Reintroduce reconcile skip

Signed-off-by: Sunny <darkowlzz@protonmail.com>
This commit is contained in:
Sunny 2022-05-17 17:26:48 +05:30
parent 71d3870e0a
commit 09db10ad73
No known key found for this signature in database
GPG Key ID: 9F3D25DDFF7FA3CF
7 changed files with 564 additions and 173 deletions

View File

@ -211,6 +211,17 @@ type GitRepositoryStatus struct {
// +optional // +optional
IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"` IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"`
// ContentConfigChecksum is a checksum of all the configurations related to
// the content of the source artifact:
// - .spec.ignore
// - .spec.recurseSubmodules
// - .spec.included and the checksum of the included artifacts
// observed in .status.observedGeneration version of the object. This can
// be used to determine if the content of the
// It has the format of `<algo>:<checksum>`, for example: `sha256:<checksum>`.
// +optional
ContentConfigChecksum string `json:"contentConfigChecksum,omitempty"`
meta.ReconcileRequestStatus `json:",inline"` meta.ReconcileRequestStatus `json:",inline"`
} }

View File

@ -653,6 +653,14 @@ spec:
- type - type
type: object type: object
type: array type: array
contentConfigChecksum:
description: 'ContentConfigChecksum is a checksum of all the configurations
related to the content of the source artifact: - .spec.ignore -
.spec.recurseSubmodules - .spec.included and the checksum of the
included artifacts observed in .status.observedGeneration version
of the object. This can be used to determine if the content of the
It has the format of `<algo>:<checksum>`, for example: `sha256:<checksum>`.'
type: string
includedArtifacts: includedArtifacts:
description: IncludedArtifacts contains a list of the last successfully description: IncludedArtifacts contains a list of the last successfully
included Artifacts as instructed by GitRepositorySpec.Include. included Artifacts as instructed by GitRepositorySpec.Include.

View File

@ -18,10 +18,12 @@ package controllers
import ( import (
"context" "context"
"crypto/sha256"
"errors" "errors"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"time" "time"
@ -361,8 +363,15 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context,
// reconcileSource ensures the upstream Git repository and reference can be // reconcileSource ensures the upstream Git repository and reference can be
// cloned and checked out using the specified configuration, and observes its // cloned and checked out using the specified configuration, and observes its
// state. // state. It also checks if the included repositories are available for use.
// //
// The included repositories are fetched and their metadata are stored. In case
// one of the included repositories isn't ready, it records
// v1beta2.IncludeUnavailableCondition=True and returns early. When all the
// included repositories are ready, it removes
// v1beta2.IncludeUnavailableCondition from the object.
// When the included artifactSet differs from the current set in the Status of
// the object, it marks the object with v1beta2.ArtifactOutdatedCondition=True.
// The repository is cloned to the given dir, using the specified configuration // The repository is cloned to the given dir, using the specified configuration
// to check out the reference. In case of an error during this process // to check out the reference. In case of an error during this process
// (including transient errors), it records v1beta2.FetchFailedCondition=True // (including transient errors), it records v1beta2.FetchFailedCondition=True
@ -377,8 +386,13 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context,
// it records v1beta2.SourceVerifiedCondition=True. // it records v1beta2.SourceVerifiedCondition=True.
// When all the above is successful, the given Commit pointer is set to the // When all the above is successful, the given Commit pointer is set to the
// commit of the checked out Git repository. // commit of the checked out Git repository.
//
// If the optimized git clone feature is enabled, it checks if the remote repo
// and the local artifact are on the same revision, and no other source content
// related configurations have changed since last reconciliation and
// short-circuits the whole reconciliation with an early return.
func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context,
obj *sourcev1.GitRepository, commit *git.Commit, _ *artifactSet, dir string) (sreconcile.Result, error) { obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) {
// Configure authentication strategy to access the source // Configure authentication strategy to access the source
var authOpts *git.AuthOptions var authOpts *git.AuthOptions
var err error var err error
@ -415,38 +429,6 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context,
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
// Configure checkout strategy
checkoutOpts := git.CheckoutOptions{RecurseSubmodules: obj.Spec.RecurseSubmodules}
if ref := obj.Spec.Reference; ref != nil {
checkoutOpts.Branch = ref.Branch
checkoutOpts.Commit = ref.Commit
checkoutOpts.Tag = ref.Tag
checkoutOpts.SemVer = ref.SemVer
}
if val, ok := r.features[features.OptimizedGitClones]; ok && val {
// Only if the object has an existing artifact in storage, attempt to
// short-circuit clone operation. reconcileStorage has already verified
// that the artifact exists.
if conditions.IsTrue(obj, sourcev1.ArtifactInStorageCondition) {
if artifact := obj.GetArtifact(); artifact != nil {
checkoutOpts.LastRevision = artifact.Revision
}
}
}
checkoutStrategy, err := strategy.CheckoutStrategyForImplementation(ctx,
git.Implementation(obj.Spec.GitImplementation), checkoutOpts)
if err != nil {
e := &serror.Stalling{
Err: fmt.Errorf("failed to configure checkout strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err),
Reason: sourcev1.GitOperationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
// Do not return err as recovery without changes is impossible
return sreconcile.ResultEmpty, e
}
repositoryURL := obj.Spec.URL repositoryURL := obj.Spec.URL
// managed GIT transport only affects the libgit2 implementation // managed GIT transport only affects the libgit2 implementation
if managed.Enabled() && obj.Spec.GitImplementation == sourcev1.LibGit2Implementation { if managed.Enabled() && obj.Spec.GitImplementation == sourcev1.LibGit2Implementation {
@ -474,53 +456,83 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context,
} }
} }
// Checkout HEAD of reference in object // Fetch the included artifact metadata.
gitCtx, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) artifacts, err := r.fetchIncludes(ctx, obj)
defer cancel() if err != nil {
c, err := checkoutStrategy.Checkout(gitCtx, dir, repositoryURL, authOpts) return sreconcile.ResultEmpty, err
}
// Observe if the artifacts still match the previous included ones
if includes.Diff(obj.Status.IncludedArtifacts) {
message := fmt.Sprintf("included artifacts differ from last observed includes")
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", message)
conditions.MarkReconciling(obj, "IncludeChange", message)
}
// Persist the ArtifactSet.
*includes = *artifacts
var optimizedClone bool
if val, ok := r.features[features.OptimizedGitClones]; ok && val {
optimizedClone = true
}
c, err := r.gitCheckout(ctx, obj, repositoryURL, authOpts, dir, optimizedClone)
if err != nil { if err != nil {
e := serror.NewGeneric( e := serror.NewGeneric(
fmt.Errorf("failed to checkout and determine revision: %w", err), fmt.Errorf("failed to checkout and determine revision: %w", err),
sourcev1.GitOperationFailedReason, sourcev1.GitOperationFailedReason,
) )
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
// Coin flip on transient or persistent error, return error and hope for the best
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
// Assign the commit to the shared commit reference. // Assign the commit to the shared commit reference.
*commit = *c *commit = *c
// If it's a partial commit obtained from an existing artifact, check if the // If it's a partial commit obtained from an existing artifact, check if the
// reconciliation can be skipped if other configurations have not changed. // reconciliation can be skipped if other configurations have not changed.
if !git.IsConcreteCommit(*commit) { if !git.IsConcreteCommit(*commit) {
ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info(fmt.Sprintf( // Calculate content configuration checksum.
"no changes since last reconciliation, observed revision '%s'", commit.String())) if r.calculateContentConfigChecksum(obj, includes) == obj.Status.ContentConfigChecksum {
ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info(fmt.Sprintf(
// Remove the target directory, as CopyToPath() renames another "no changes since last reconciliation, observed revision '%s'", commit.String()))
// directory to which the artifact is unpacked into the target ge := serror.NewGeneric(
// directory. At this point, the target directory is empty, safe to fmt.Errorf("no changes since last reconcilation: observed revision '%s'",
// remove. commit.String()), sourcev1.GitOperationSucceedReason,
os.RemoveAll(dir)
if err := r.Storage.CopyToPath(obj.GetArtifact(), "/", dir); err != nil {
e := serror.NewGeneric(
fmt.Errorf("failed to copy existing artifact to source dir: %w", err),
sourcev1.CopyOperationFailedReason,
) )
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) ge.Notification = false
ge.Ignore = true
ge.Event = corev1.EventTypeNormal
// Remove any stale fetch failed condition.
conditions.Delete(obj, sourcev1.FetchFailedCondition)
// IMPORTANT: This must be set to ensure that the observed
// generation of this condition is updated. In case of full
// reconciliation reconcileArtifact() ensures that it's set at the
// very end.
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
"stored artifact for revision '%s'", commit.String())
// TODO: Find out if such condition setting is needed when commit
// signature verification is enabled.
return sreconcile.ResultEmpty, ge
}
// If we can't skip the reconciliation, checkout again without any
// optimization.
c, err := r.gitCheckout(ctx, obj, repositoryURL, authOpts, dir, false)
if err != nil {
e := serror.NewGeneric(
fmt.Errorf("failed to checkout and determine revision: %w", err),
sourcev1.GitOperationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
conditions.Delete(obj, sourcev1.FetchFailedCondition) *commit = *c
conditions.Delete(obj, sourcev1.StorageOperationFailedCondition)
return sreconcile.ResultSuccess, nil
} }
ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info("git repository checked out", "url", obj.Spec.URL, "revision", commit.String()) ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info("git repository checked out", "url", obj.Spec.URL, "revision", commit.String())
conditions.Delete(obj, sourcev1.FetchFailedCondition) conditions.Delete(obj, sourcev1.FetchFailedCondition)
// In case no-op clone resulted in a failure and in the subsequent
// reconciliation a new remote revision was observed, delete any stale
// StorageOperationFailedCondition.
conditions.Delete(obj, sourcev1.StorageOperationFailedCondition)
// Verify commit signature // Verify commit signature
if result, err := r.verifyCommitSignature(ctx, obj, *commit); err != nil || result == sreconcile.ResultEmpty { if result, err := r.verifyCommitSignature(ctx, obj, *commit); err != nil || result == sreconcile.ResultEmpty {
@ -541,21 +553,27 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context,
// //
// The inspection of the given data to the object is differed, ensuring any // The inspection of the given data to the object is differed, ensuring any
// stale observations like v1beta2.ArtifactOutdatedCondition are removed. // stale observations like v1beta2.ArtifactOutdatedCondition are removed.
// If the given Artifact and/or artifactSet (includes) do not differ from the // If the given Artifact and/or artifactSet (includes) and the content config
// object's current, it returns early. // checksum do not differ from the object's current, it returns early.
// Source ignore patterns are loaded, and the given directory is archived while // Source ignore patterns are loaded, and the given directory is archived while
// taking these patterns into account. // taking these patterns into account.
// On a successful archive, the Artifact and Includes in the Status of the // On a successful archive, the Artifact, Includes and new content config
// object are set, and the symlink in the Storage is updated to its path. // checksum in the Status of the object are set, and the symlink in the Storage
// is updated to its path.
func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context,
obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) {
// Create potential new artifact with current available metadata // Create potential new artifact with current available metadata
artifact := r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), commit.String(), fmt.Sprintf("%s.tar.gz", commit.Hash.String())) artifact := r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), commit.String(), fmt.Sprintf("%s.tar.gz", commit.Hash.String()))
// Calculate the content config checksum.
ccc := r.calculateContentConfigChecksum(obj, includes)
// Set the ArtifactInStorageCondition if there's no drift. // Set the ArtifactInStorageCondition if there's no drift.
defer func() { defer func() {
if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) { if obj.GetArtifact().HasRevision(artifact.Revision) &&
!includes.Diff(obj.Status.IncludedArtifacts) &&
obj.Status.ContentConfigChecksum == ccc {
conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition)
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
"stored artifact for revision '%s'", artifact.Revision) "stored artifact for revision '%s'", artifact.Revision)
@ -563,7 +581,9 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context,
}() }()
// The artifact is up-to-date // The artifact is up-to-date
if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) { if obj.GetArtifact().HasRevision(artifact.Revision) &&
!includes.Diff(obj.Status.IncludedArtifacts) &&
obj.Status.ContentConfigChecksum == ccc {
r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision) r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision)
return sreconcile.ResultSuccess, nil return sreconcile.ResultSuccess, nil
} }
@ -629,6 +649,7 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context,
// Record it on the object // Record it on the object
obj.Status.Artifact = artifact.DeepCopy() obj.Status.Artifact = artifact.DeepCopy()
obj.Status.IncludedArtifacts = *includes obj.Status.IncludedArtifacts = *includes
obj.Status.ContentConfigChecksum = ccc
// Update symlink on a "best effort" basis // Update symlink on a "best effort" basis
url, err := r.Storage.Symlink(artifact, "latest.tar.gz") url, err := r.Storage.Symlink(artifact, "latest.tar.gz")
@ -656,7 +677,6 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context,
func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context,
obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) {
artifacts := make(artifactSet, len(obj.Spec.Include))
for i, incl := range obj.Spec.Include { for i, incl := range obj.Spec.Include {
// Do this first as it is much cheaper than copy operations // Do this first as it is much cheaper than copy operations
toPath, err := securejoin.SecureJoin(dir, incl.GetToPath()) toPath, err := securejoin.SecureJoin(dir, incl.GetToPath())
@ -665,56 +685,137 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context,
Err: fmt.Errorf("path calculation for include '%s' failed: %w", incl.GitRepositoryRef.Name, err), Err: fmt.Errorf("path calculation for include '%s' failed: %w", incl.GitRepositoryRef.Name, err),
Reason: "IllegalPath", Reason: "IllegalPath",
} }
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error()) conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
// Retrieve the included GitRepository // Get artifact at the same include index. The artifactSet is created
dep := &sourcev1.GitRepository{} // such that the index of artifactSet matches with the index of Include.
if err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: incl.GitRepositoryRef.Name}, dep); err != nil { // Hence, index is used here to pick the associated artifact from
e := &serror.Event{ // includes.
Err: fmt.Errorf("could not get resource for include '%s': %w", incl.GitRepositoryRef.Name, err), var artifact *sourcev1.Artifact
Reason: "NotFound", for j, art := range *includes {
if i == j {
artifact = art
} }
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
} }
// Confirm include has an artifact // Copy artifact (sub)contents to configured directory.
if dep.GetArtifact() == nil { if err := r.Storage.CopyToPath(artifact, incl.GetFromPath(), toPath); err != nil {
e := &serror.Event{
Err: fmt.Errorf("no artifact available for include '%s'", incl.GitRepositoryRef.Name),
Reason: "NoArtifact",
}
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
// Copy artifact (sub)contents to configured directory
if err := r.Storage.CopyToPath(dep.GetArtifact(), incl.GetFromPath(), toPath); err != nil {
e := &serror.Event{ e := &serror.Event{
Err: fmt.Errorf("failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err), Err: fmt.Errorf("failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err),
Reason: "CopyFailure", Reason: "CopyFailure",
} }
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error()) conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
}
conditions.Delete(obj, sourcev1.StorageOperationFailedCondition)
return sreconcile.ResultSuccess, nil
}
// gitCheckout builds checkout options with the given configurations and
// performs a git checkout.
func (r *GitRepositoryReconciler) gitCheckout(ctx context.Context,
obj *sourcev1.GitRepository, repoURL string, authOpts *git.AuthOptions, dir string, optimized bool) (*git.Commit, error) {
// Configure checkout strategy.
checkoutOpts := git.CheckoutOptions{RecurseSubmodules: obj.Spec.RecurseSubmodules}
if ref := obj.Spec.Reference; ref != nil {
checkoutOpts.Branch = ref.Branch
checkoutOpts.Commit = ref.Commit
checkoutOpts.Tag = ref.Tag
checkoutOpts.SemVer = ref.SemVer
}
// Only if the object has an existing artifact in storage, attempt to
// short-circuit clone operation. reconcileStorage has already verified
// that the artifact exists.
if optimized && conditions.IsTrue(obj, sourcev1.ArtifactInStorageCondition) {
if artifact := obj.GetArtifact(); artifact != nil {
checkoutOpts.LastRevision = artifact.Revision
}
}
checkoutStrategy, err := strategy.CheckoutStrategyForImplementation(ctx,
git.Implementation(obj.Spec.GitImplementation), checkoutOpts)
if err != nil {
e := &serror.Stalling{
Err: fmt.Errorf("failed to configure checkout strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err),
Reason: sourcev1.GitOperationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
// Do not return err as recovery without changes is impossible.
return nil, e
}
// Checkout HEAD of reference in object
gitCtx, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
defer cancel()
return checkoutStrategy.Checkout(gitCtx, dir, repoURL, authOpts)
}
// fetchIncludes fetches artifact metadata of all the included repos.
func (r *GitRepositoryReconciler) fetchIncludes(ctx context.Context, obj *sourcev1.GitRepository) (*artifactSet, error) {
artifacts := make(artifactSet, len(obj.Spec.Include))
for i, incl := range obj.Spec.Include {
// Retrieve the included GitRepository.
dep := &sourcev1.GitRepository{}
if err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: incl.GitRepositoryRef.Name}, dep); err != nil {
e := serror.NewWaiting(
fmt.Errorf("could not get resource for include '%s': %w", incl.GitRepositoryRef.Name, err),
"NotFound",
)
e.RequeueAfter = r.requeueDependency
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
return nil, e
}
// Confirm include has an artifact
if dep.GetArtifact() == nil {
e := serror.NewWaiting(
fmt.Errorf("no artifact available for include '%s'", incl.GitRepositoryRef.Name),
"NoArtifact",
)
e.RequeueAfter = r.requeueDependency
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
return nil, e
}
artifacts[i] = dep.GetArtifact().DeepCopy() artifacts[i] = dep.GetArtifact().DeepCopy()
} }
// We now know all includes are available // We now know all the includes are available.
conditions.Delete(obj, sourcev1.IncludeUnavailableCondition) conditions.Delete(obj, sourcev1.IncludeUnavailableCondition)
// Observe if the artifacts still match the previous included ones return &artifacts, nil
if artifacts.Diff(obj.Status.IncludedArtifacts) { }
message := fmt.Sprintf("included artifacts differ from last observed includes")
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", message) // calculateContentConfigChecksum calculates a checksum of all the
conditions.MarkReconciling(obj, "IncludeChange", message) // configurations that result in a change in the source artifact. It can be used
// to decide if further reconciliation is needed when an artifact already exists
// for a set of configurations.
func (r *GitRepositoryReconciler) calculateContentConfigChecksum(obj *sourcev1.GitRepository, includes *artifactSet) string {
c := []byte{}
// Consider the ignore rules and recurse submodules.
if obj.Spec.Ignore != nil {
c = append(c, []byte(*obj.Spec.Ignore)...)
}
c = append(c, []byte(strconv.FormatBool(obj.Spec.RecurseSubmodules))...)
// Consider the included repository attributes.
for _, incl := range obj.Spec.Include {
c = append(c, []byte(incl.GitRepositoryRef.Name+incl.FromPath+incl.ToPath)...)
} }
// Persist the artifactSet. // Consider the checksum of all the included remote artifact. This ensures
*includes = artifacts // that if the included repos get updated, this checksum changes.
return sreconcile.ResultSuccess, nil if includes != nil {
for _, incl := range *includes {
c = append(c, []byte(incl.Checksum)...)
}
}
return fmt.Sprintf("sha256:%x", sha256.Sum256(c))
} }
// verifyCommitSignature verifies the signature of the given Git commit, if a // verifyCommitSignature verifies the signature of the given Git commit, if a

View File

@ -141,10 +141,13 @@ Oomb3gD/TRf/nAdVED+k81GdLzciYdUGtI71/qI47G0nMBluLRE=
=/4e+ =/4e+
-----END PGP PUBLIC KEY BLOCK----- -----END PGP PUBLIC KEY BLOCK-----
` `
emptyContentConfigChecksum = "sha256:fcbcf165908dd18a9e49f7ff27810176db8e9f63b4352213741664245224f8aa"
) )
var ( var (
testGitImplementations = []string{sourcev1.GoGitImplementation, sourcev1.LibGit2Implementation} // testGitImplementations = []string{sourcev1.GoGitImplementation, sourcev1.LibGit2Implementation}
// testGitImplementations = []string{sourcev1.GoGitImplementation}
testGitImplementations = []string{sourcev1.LibGit2Implementation}
) )
func TestGitRepositoryReconciler_Reconcile(t *testing.T) { func TestGitRepositoryReconciler_Reconcile(t *testing.T) {
@ -638,8 +641,33 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T)
Revision: "staging/" + latestRev, Revision: "staging/" + latestRev,
Path: randStringRunes(10), Path: randStringRunes(10),
}, },
// Checksum with all the relevant fields unset.
ContentConfigChecksum: emptyContentConfigChecksum,
}
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo")
},
want: sreconcile.ResultEmpty,
wantErr: true,
wantRevision: "staging/<commit>",
wantArtifactOutdated: false,
},
{
name: "Optimized clone different ignore",
reference: &sourcev1.GitRepositoryRef{
Branch: "staging",
},
beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) {
// Set new ignore value.
obj.Spec.Ignore = pointer.StringPtr("foo")
// Add existing artifact on the object and storage.
obj.Status = sourcev1.GitRepositoryStatus{
Artifact: &sourcev1.Artifact{
Revision: "staging/" + latestRev,
Path: randStringRunes(10),
},
// Checksum with all the relevant fields unset.
ContentConfigChecksum: emptyContentConfigChecksum,
} }
testStorage.Archive(obj.GetArtifact(), "testdata/git/repository", nil)
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo") conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo")
}, },
want: sreconcile.ResultSuccess, want: sreconcile.ResultSuccess,
@ -782,6 +810,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) {
obj.Spec.Interval = metav1.Duration{Duration: interval} obj.Spec.Interval = metav1.Duration{Duration: interval}
obj.Status.Artifact = &sourcev1.Artifact{Revision: "main/revision"} obj.Status.Artifact = &sourcev1.Artifact{Revision: "main/revision"}
obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main/revision"}} obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main/revision"}}
obj.Status.ContentConfigChecksum = "sha256:fcbcf165908dd18a9e49f7ff27810176db8e9f63b4352213741664245224f8aa"
}, },
afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
t.Expect(obj.Status.URL).To(BeEmpty()) t.Expect(obj.Status.URL).To(BeEmpty())
@ -986,39 +1015,6 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) {
{name: "b", toPath: "b/", shouldExist: true}, {name: "b", toPath: "b/", shouldExist: true},
}, },
want: sreconcile.ResultSuccess, want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "IncludeChange", "included artifacts differ from last observed includes"),
*conditions.TrueCondition(meta.ReconcilingCondition, "IncludeChange", "included artifacts differ from last observed includes"),
},
},
{
name: "Include get failure makes IncludeUnavailable=True and returns error",
includes: []include{
{name: "a", toPath: "a/"},
},
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "could not get resource for include 'a': gitrepositories.source.toolkit.fluxcd.io \"a\" not found"),
},
},
{
name: "Include without an artifact makes IncludeUnavailable=True",
dependencies: []dependency{
{
name: "a",
withArtifact: false,
conditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "Foo", "foo unavailable"),
},
},
},
includes: []include{
{name: "a", toPath: "a/"},
},
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "no artifact available for include 'a'"),
},
}, },
{ {
name: "Invalid FromPath makes IncludeUnavailable=True and returns error", name: "Invalid FromPath makes IncludeUnavailable=True and returns error",
@ -1033,17 +1029,9 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) {
}, },
wantErr: true, wantErr: true,
assertConditions: []metav1.Condition{ assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "CopyFailure", "unpack/path: no such file or directory"), *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, "CopyFailure", "unpack/path: no such file or directory"),
}, },
}, },
{
name: "Outdated IncludeUnavailable is removed",
beforeFunc: func(obj *sourcev1.GitRepository) {
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", "")
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{},
},
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
@ -1112,6 +1100,11 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) {
var commit git.Commit var commit git.Commit
var includes artifactSet var includes artifactSet
// Build includes artifactSet.
artifactSet, err := r.fetchIncludes(ctx, obj)
g.Expect(err).ToNot(HaveOccurred())
includes = *artifactSet
got, err := r.reconcileInclude(ctx, obj, &commit, &includes, tmpDir) got, err := r.reconcileInclude(ctx, obj, &commit, &includes, tmpDir)
g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions))
g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(err != nil).To(Equal(tt.wantErr))
@ -1921,3 +1914,196 @@ func TestGitRepositoryReconciler_notify(t *testing.T) {
}) })
} }
} }
func TestGitRepositoryReconciler_fetchIncludes(t *testing.T) {
type dependency struct {
name string
withArtifact bool
conditions []metav1.Condition
}
type include struct {
name string
fromPath string
toPath string
shouldExist bool
}
tests := []struct {
name string
dependencies []dependency
includes []include
beforeFunc func(obj *sourcev1.GitRepository)
wantErr bool
wantArtifactSet artifactSet
assertConditions []metav1.Condition
}{
{
name: "Existing includes",
dependencies: []dependency{
{
name: "a",
withArtifact: true,
conditions: []metav1.Condition{
*conditions.TrueCondition(meta.ReadyCondition, "Foo", "foo ready"),
},
},
{
name: "b",
withArtifact: true,
conditions: []metav1.Condition{
*conditions.TrueCondition(meta.ReadyCondition, "Bar", "bar ready"),
},
},
},
includes: []include{
{name: "a", toPath: "a/", shouldExist: true},
{name: "b", toPath: "b/", shouldExist: true},
},
wantErr: false,
wantArtifactSet: []*sourcev1.Artifact{
{Revision: "a"},
{Revision: "b"},
},
},
{
name: "Include get failure",
includes: []include{
{name: "a", toPath: "a/"},
},
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "could not get resource for include 'a': gitrepositories.source.toolkit.fluxcd.io \"a\" not found"),
},
},
{
name: "Include without an artifact makes IncludeUnavailable=True",
dependencies: []dependency{
{
name: "a",
withArtifact: false,
conditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "Foo", "foo unavailable"),
},
},
},
includes: []include{
{name: "a", toPath: "a/"},
},
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "no artifact available for include 'a'"),
},
},
{
name: "Outdated IncludeUnavailable is removed",
beforeFunc: func(obj *sourcev1.GitRepository) {
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", "")
},
assertConditions: []metav1.Condition{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
var depObjs []client.Object
for _, d := range tt.dependencies {
obj := &sourcev1.GitRepository{
ObjectMeta: metav1.ObjectMeta{
Name: d.name,
},
Status: sourcev1.GitRepositoryStatus{
Conditions: d.conditions,
},
}
if d.withArtifact {
obj.Status.Artifact = &sourcev1.Artifact{
Path: d.name + ".tar.gz",
Revision: d.name,
LastUpdateTime: metav1.Now(),
}
}
depObjs = append(depObjs, obj)
}
builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
if len(tt.dependencies) > 0 {
builder.WithObjects(depObjs...)
}
r := &GitRepositoryReconciler{
Client: builder.Build(),
EventRecorder: record.NewFakeRecorder(32),
}
obj := &sourcev1.GitRepository{
ObjectMeta: metav1.ObjectMeta{
Name: "reconcile-include",
},
Spec: sourcev1.GitRepositorySpec{
Interval: metav1.Duration{Duration: interval},
},
}
for i, incl := range tt.includes {
incl := sourcev1.GitRepositoryInclude{
GitRepositoryRef: meta.LocalObjectReference{Name: incl.name},
FromPath: incl.fromPath,
ToPath: incl.toPath,
}
tt.includes[i].fromPath = incl.GetFromPath()
tt.includes[i].toPath = incl.GetToPath()
obj.Spec.Include = append(obj.Spec.Include, incl)
}
gotArtifactSet, err := r.fetchIncludes(ctx, obj)
g.Expect(err != nil).To(Equal(tt.wantErr))
g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions))
if !tt.wantErr && gotArtifactSet != nil {
g.Expect(gotArtifactSet.Diff(tt.wantArtifactSet)).To(BeFalse())
}
})
}
}
func TestGitRepositoryReconciler_calculateContentConfigChecksum(t *testing.T) {
g := NewWithT(t)
obj := &sourcev1.GitRepository{}
r := &GitRepositoryReconciler{}
emptyChecksum := r.calculateContentConfigChecksum(obj, nil)
g.Expect(emptyChecksum).To(Equal(emptyContentConfigChecksum))
// Ignore modified.
obj.Spec.Ignore = pointer.String("some-rule")
ignoreModChecksum := r.calculateContentConfigChecksum(obj, nil)
g.Expect(emptyChecksum).ToNot(Equal(ignoreModChecksum))
// Recurse submodules modified.
obj.Spec.RecurseSubmodules = true
submodModChecksum := r.calculateContentConfigChecksum(obj, nil)
g.Expect(ignoreModChecksum).ToNot(Equal(submodModChecksum))
// Include modified.
obj.Spec.Include = []sourcev1.GitRepositoryInclude{
{
GitRepositoryRef: meta.LocalObjectReference{Name: "foo"},
FromPath: "aaa",
ToPath: "bbb",
},
}
artifacts := &artifactSet{
&sourcev1.Artifact{Checksum: "some-checksum-1"},
}
includeModChecksum := r.calculateContentConfigChecksum(obj, artifacts)
g.Expect(submodModChecksum).ToNot(Equal(includeModChecksum))
// Artifact modified.
artifacts = &artifactSet{
&sourcev1.Artifact{Checksum: "some-checksum-2"},
}
artifactModChecksum := r.calculateContentConfigChecksum(obj, artifacts)
g.Expect(includeModChecksum).ToNot(Equal(artifactModChecksum))
}

View File

@ -1643,6 +1643,25 @@ Artifacts as instructed by GitRepositorySpec.Include.</p>
</tr> </tr>
<tr> <tr>
<td> <td>
<code>contentConfigChecksum</code><br>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>ContentConfigChecksum is a checksum of all the configurations related to
the content of the source artifact:
- .spec.ignore
- .spec.recurseSubmodules
- .spec.included and the checksum of the included artifacts
observed in .status.observedGeneration version of the object. This can
be used to determine if the content of the
It has the format of <code>&lt;algo&gt;:&lt;checksum&gt;</code>, for example: <code>sha256:&lt;checksum&gt;</code>.</p>
</td>
</tr>
<tr>
<td>
<code>ReconcileRequestStatus</code><br> <code>ReconcileRequestStatus</code><br>
<em> <em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus"> <a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus">

View File

@ -34,6 +34,8 @@ import (
"github.com/fluxcd/source-controller/pkg/git/libgit2/managed" "github.com/fluxcd/source-controller/pkg/git/libgit2/managed"
) )
const defaultRemoteName = "origin"
// CheckoutStrategyForOptions returns the git.CheckoutStrategy for the given // CheckoutStrategyForOptions returns the git.CheckoutStrategy for the given
// git.CheckoutOptions. // git.CheckoutOptions.
func CheckoutStrategyForOptions(ctx context.Context, opt git.CheckoutOptions) git.CheckoutStrategy { func CheckoutStrategyForOptions(ctx context.Context, opt git.CheckoutOptions) git.CheckoutStrategy {
@ -67,14 +69,28 @@ type CheckoutBranch struct {
func (c *CheckoutBranch) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (_ *git.Commit, err error) { func (c *CheckoutBranch) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (_ *git.Commit, err error) {
defer recoverPanic(&err) defer recoverPanic(&err)
repo, remote, free, err := getBlankRepoAndRemote(ctx, path, url, opts) remoteCallBacks := RemoteCallbacks(ctx, opts)
proxyOpts := &git2go.ProxyOptions{Type: git2go.ProxyTypeAuto}
repo, remote, err := initializeRepoWithRemote(ctx, path, url, opts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer free() // Open remote connection.
err = remote.ConnectFetch(&remoteCallBacks, proxyOpts, nil)
if err != nil {
remote.Free()
repo.Free()
return nil, fmt.Errorf("unable to fetch-connect to remote '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
}
defer func() {
remote.Disconnect()
remote.Free()
repo.Free()
}()
// When the last observed revision is set, check whether it is still // When the last observed revision is set, check whether it is still the
// the same at the remote branch. If so, short-circuit the clone operation here. // same at the remote branch. If so, short-circuit the clone operation here.
if c.LastRevision != "" { if c.LastRevision != "" {
heads, err := remote.Ls(c.Branch) heads, err := remote.Ls(c.Branch)
if err != nil { if err != nil {
@ -98,7 +114,7 @@ func (c *CheckoutBranch) Checkout(ctx context.Context, path, url string, opts *g
err = remote.Fetch([]string{c.Branch}, err = remote.Fetch([]string{c.Branch},
&git2go.FetchOptions{ &git2go.FetchOptions{
DownloadTags: git2go.DownloadTagsNone, DownloadTags: git2go.DownloadTagsNone,
RemoteCallbacks: RemoteCallbacks(ctx, opts), RemoteCallbacks: remoteCallBacks,
ProxyOptions: git2go.ProxyOptions{Type: git2go.ProxyTypeAuto}, ProxyOptions: git2go.ProxyOptions{Type: git2go.ProxyTypeAuto},
}, },
"") "")
@ -154,12 +170,28 @@ type CheckoutTag struct {
func (c *CheckoutTag) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (_ *git.Commit, err error) { func (c *CheckoutTag) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (_ *git.Commit, err error) {
defer recoverPanic(&err) defer recoverPanic(&err)
repo, remote, free, err := getBlankRepoAndRemote(ctx, path, url, opts) remoteCallBacks := RemoteCallbacks(ctx, opts)
proxyOpts := &git2go.ProxyOptions{Type: git2go.ProxyTypeAuto}
repo, remote, err := initializeRepoWithRemote(ctx, path, url, opts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer free() // Open remote connection.
err = remote.ConnectFetch(&remoteCallBacks, proxyOpts, nil)
if err != nil {
remote.Free()
repo.Free()
return nil, fmt.Errorf("unable to fetch-connect to remote '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
}
defer func() {
remote.Disconnect()
remote.Free()
repo.Free()
}()
// When the last observed revision is set, check whether it is still the
// same at the remote branch. If so, short-circuit the clone operation here.
if c.LastRevision != "" { if c.LastRevision != "" {
heads, err := remote.Ls(c.Tag) heads, err := remote.Ls(c.Tag)
if err != nil { if err != nil {
@ -192,8 +224,8 @@ func (c *CheckoutTag) Checkout(ctx context.Context, path, url string, opts *git.
err = remote.Fetch([]string{c.Tag}, err = remote.Fetch([]string{c.Tag},
&git2go.FetchOptions{ &git2go.FetchOptions{
DownloadTags: git2go.DownloadTagsAuto, DownloadTags: git2go.DownloadTagsAuto,
RemoteCallbacks: RemoteCallbacks(ctx, opts), RemoteCallbacks: remoteCallBacks,
ProxyOptions: git2go.ProxyOptions{Type: git2go.ProxyTypeAuto}, ProxyOptions: *proxyOpts,
}, },
"") "")
@ -415,34 +447,34 @@ func buildSignature(s *git2go.Signature) git.Signature {
} }
} }
// getBlankRepoAndRemote returns a newly initialized repository, and a remote connected to the provided url. // initializeRepoWithRemote initializes or opens a repository at the given path
// Callers must call the returning function to free all git2go objects. // and configures it with the given remote "origin" URL. If a remote already
func getBlankRepoAndRemote(ctx context.Context, path, url string, opts *git.AuthOptions) (*git2go.Repository, *git2go.Remote, func(), error) { // exists with a different URL, it returns an error.
func initializeRepoWithRemote(ctx context.Context, path, url string, opts *git.AuthOptions) (*git2go.Repository, *git2go.Remote, error) {
repo, err := git2go.InitRepository(path, false) repo, err := git2go.InitRepository(path, false)
if err != nil { if err != nil {
return nil, nil, nil, fmt.Errorf("unable to init repository for '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err)) return nil, nil, fmt.Errorf("unable to init repository for '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
} }
remote, err := repo.Remotes.Create("origin", url) remote, err := repo.Remotes.Create(defaultRemoteName, url)
if err != nil { if err != nil {
repo.Free() // If the remote already exists, lookup the remote.
return nil, nil, nil, fmt.Errorf("unable to create remote for '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err)) if git2go.IsErrorCode(err, git2go.ErrorCodeExists) {
remote, err = repo.Remotes.Lookup(defaultRemoteName)
if err != nil {
repo.Free()
return nil, nil, fmt.Errorf("unable to create or lookup remote '%s'", defaultRemoteName)
}
if remote.Url() != url {
repo.Free()
return nil, nil, fmt.Errorf("remote '%s' with different address '%s' already exists", defaultRemoteName, remote.Url())
}
} else {
repo.Free()
return nil, nil, fmt.Errorf("unable to create remote for '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
}
} }
return repo, remote, nil
callBacks := RemoteCallbacks(ctx, opts)
err = remote.ConnectFetch(&callBacks, &git2go.ProxyOptions{Type: git2go.ProxyTypeAuto}, nil)
if err != nil {
remote.Free()
repo.Free()
return nil, nil, nil, fmt.Errorf("unable to fetch-connect to remote '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
}
free := func() {
remote.Disconnect()
remote.Free()
repo.Free()
}
return repo, remote, free, nil
} }
func recoverPanic(err *error) { func recoverPanic(err *error) {

View File

@ -580,3 +580,37 @@ func TestCheckout_ED25519(t *testing.T) {
_, err = branchCheckoutStrat.Checkout(ctx, tmpDir, repoURL, authOpts) _, err = branchCheckoutStrat.Checkout(ctx, tmpDir, repoURL, authOpts)
g.Expect(err).ToNot(HaveOccurred()) g.Expect(err).ToNot(HaveOccurred())
} }
func TestInitializeRepoWithRemote(t *testing.T) {
g := NewWithT(t)
tmp := t.TempDir()
ctx := context.TODO()
testRepoURL := "https://example.com/foo/bar"
testRepoURL2 := "https://example.com/foo/baz"
authOpts, err := git.AuthOptionsWithoutSecret(testRepoURL)
g.Expect(err).ToNot(HaveOccurred())
authOpts2, err := git.AuthOptionsWithoutSecret(testRepoURL2)
g.Expect(err).ToNot(HaveOccurred())
// Fresh initialization.
repo, remote, err := initializeRepoWithRemote(ctx, tmp, testRepoURL, authOpts)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(repo.IsBare()).To(BeFalse())
g.Expect(remote.Name()).To(Equal(defaultRemoteName))
g.Expect(remote.Url()).To(Equal(testRepoURL))
remote.Free()
repo.Free()
// Reinitialize to ensure it reuses the existing origin.
repo, remote, err = initializeRepoWithRemote(ctx, tmp, testRepoURL, authOpts)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(repo.IsBare()).To(BeFalse())
g.Expect(remote.Name()).To(Equal(defaultRemoteName))
g.Expect(remote.Url()).To(Equal(testRepoURL))
remote.Free()
repo.Free()
// Reinitialize with a different remote URL for existing origin.
_, _, err = initializeRepoWithRemote(ctx, tmp, testRepoURL2, authOpts2)
g.Expect(err).To(HaveOccurred())
}