Make exechooks work like webhooks.

This deprecates the `--sync-hook-command` flag in favor of
`--exechook-command`, `--exechook-timeout`, and `--exechook-backoff`.
This commit is contained in:
Tim Hockin 2021-08-16 22:14:10 -07:00
parent 766682c5fa
commit 0075df238c
13 changed files with 852 additions and 405 deletions

View File

@ -19,9 +19,7 @@ limitations under the License.
package main // import "k8s.io/git-sync/cmd/git-sync" package main // import "k8s.io/git-sync/cmd/git-sync"
import ( import (
"bytes"
"context" "context"
"encoding/json"
stdflag "flag" // renamed so we don't accidentally use it stdflag "flag" // renamed so we don't accidentally use it
"fmt" "fmt"
"io" "io"
@ -38,11 +36,12 @@ import (
"sync" "sync"
"time" "time"
"github.com/go-logr/glogr"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"k8s.io/git-sync/pkg/cmd"
"k8s.io/git-sync/pkg/hook"
"k8s.io/git-sync/pkg/logging"
"k8s.io/git-sync/pkg/pid1" "k8s.io/git-sync/pkg/pid1"
"k8s.io/git-sync/pkg/version" "k8s.io/git-sync/pkg/version"
) )
@ -82,11 +81,16 @@ var flMaxSyncFailures = pflag.Int("max-sync-failures", envInt("GIT_SYNC_MAX_SYNC
var flChmod = pflag.Int("change-permissions", envInt("GIT_SYNC_PERMISSIONS", 0), var flChmod = pflag.Int("change-permissions", envInt("GIT_SYNC_PERMISSIONS", 0),
"optionally change permissions on the checked-out files to the specified mode") "optionally change permissions on the checked-out files to the specified mode")
var flSyncHookCommand = pflag.String("sync-hook-command", envString("GIT_SYNC_HOOK_COMMAND", ""),
"an optional command to be executed after syncing a new hash of the remote repository")
var flSparseCheckoutFile = pflag.String("sparse-checkout-file", envString("GIT_SYNC_SPARSE_CHECKOUT_FILE", ""), var flSparseCheckoutFile = pflag.String("sparse-checkout-file", envString("GIT_SYNC_SPARSE_CHECKOUT_FILE", ""),
"the path to a sparse-checkout file") "the path to a sparse-checkout file")
var flExechookCommand = pflag.String("exechook-command", envString("GIT_EXECHOOK_COMMAND", ""),
"an optional command to be run when syncs complete")
var flExechookTimeout = pflag.Duration("exechook-timeout", envDuration("GIT_EXECHOOK_TIMEOUT", time.Second*30),
"the timeout for the exechook")
var flExechookBackoff = pflag.Duration("exechook-backoff", envDuration("GIT_EXECHOOK_BACKOFF", time.Second*3),
"the time to wait before retrying a failed exechook")
var flWebhookURL = pflag.String("webhook-url", envString("GIT_SYNC_WEBHOOK_URL", ""), var flWebhookURL = pflag.String("webhook-url", envString("GIT_SYNC_WEBHOOK_URL", ""),
"a URL for optional webhook notifications when syncs complete") "a URL for optional webhook notifications when syncs complete")
var flWebhookMethod = pflag.String("webhook-method", envString("GIT_SYNC_WEBHOOK_METHOD", "POST"), var flWebhookMethod = pflag.String("webhook-method", envString("GIT_SYNC_WEBHOOK_METHOD", "POST"),
@ -141,14 +145,18 @@ var flTimeout = pflag.Int("timeout", envInt("GIT_SYNC_TIMEOUT", 0),
"DEPRECATED: use --sync-timeout instead") "DEPRECATED: use --sync-timeout instead")
var flDest = pflag.String("dest", envString("GIT_SYNC_DEST", ""), var flDest = pflag.String("dest", envString("GIT_SYNC_DEST", ""),
"DEPRECATED: use --link instead") "DEPRECATED: use --link instead")
var flSyncHookCommand = pflag.String("sync-hook-command", envString("GIT_SYNC_HOOK_COMMAND", ""),
"DEPRECATED: use --exechook-command instead")
func init() { func init() {
pflag.CommandLine.MarkDeprecated("wait", "use --period instead") pflag.CommandLine.MarkDeprecated("wait", "use --period instead")
pflag.CommandLine.MarkDeprecated("timeout", "use --sync-timeout instead") pflag.CommandLine.MarkDeprecated("timeout", "use --sync-timeout instead")
pflag.CommandLine.MarkDeprecated("dest", "use --link instead") pflag.CommandLine.MarkDeprecated("dest", "use --link instead")
pflag.CommandLine.MarkDeprecated("sync-hook-command", "use --exechook-command instead")
} }
var log *customLogger var cmdRunner *cmd.Runner
var log *logging.Logger
// Total pull/error, summary on pull duration // Total pull/error, summary on pull duration
var ( var (
@ -183,103 +191,6 @@ const (
submodulesOff submodulesMode = "off" submodulesOff submodulesMode = "off"
) )
type customLogger struct {
logr.Logger
root string
errorFile string
}
func (l customLogger) Error(err error, msg string, kvList ...interface{}) {
l.Logger.Error(err, msg, kvList...)
if l.errorFile == "" {
return
}
payload := struct {
Msg string
Err string
Args map[string]interface{}
}{
Msg: msg,
Err: err.Error(),
Args: map[string]interface{}{},
}
if len(kvList)%2 != 0 {
kvList = append(kvList, "<no-value>")
}
for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string)
if !ok {
k = fmt.Sprintf("%v", kvList[i])
}
payload.Args[k] = kvList[i+1]
}
jb, err := json.Marshal(payload)
if err != nil {
l.Logger.Error(err, "can't encode error payload")
content := fmt.Sprintf("%v", err)
l.writeContent([]byte(content))
} else {
l.writeContent(jb)
}
}
// exportError exports the error to the error file if --export-error is enabled.
func (l *customLogger) exportError(content string) {
if l.errorFile == "" {
return
}
l.writeContent([]byte(content))
}
// writeContent writes the error content to the error file.
func (l *customLogger) writeContent(content []byte) {
if _, err := os.Stat(l.root); os.IsNotExist(err) {
fileMode := os.FileMode(0755)
if err := os.Mkdir(l.root, fileMode); err != nil {
l.Logger.Error(err, "can't create the root directory", "root", l.root)
return
}
}
tmpFile, err := ioutil.TempFile(l.root, "tmp-err-")
if err != nil {
l.Logger.Error(err, "can't create temporary error-file", "directory", l.root, "prefix", "tmp-err-")
return
}
defer func() {
if err := tmpFile.Close(); err != nil {
l.Logger.Error(err, "can't close temporary error-file", "filename", tmpFile.Name())
}
}()
if _, err = tmpFile.Write(content); err != nil {
l.Logger.Error(err, "can't write to temporary error-file", "filename", tmpFile.Name())
return
}
errorFile := filepath.Join(l.root, l.errorFile)
if err := os.Rename(tmpFile.Name(), errorFile); err != nil {
l.Logger.Error(err, "can't rename to error-file", "temp-file", tmpFile.Name(), "error-file", errorFile)
return
}
if err := os.Chmod(errorFile, 0644); err != nil {
l.Logger.Error(err, "can't change permissions on the error-file", "error-file", errorFile)
}
}
// deleteErrorFile deletes the error file.
func (l *customLogger) deleteErrorFile() {
if l.errorFile == "" {
return
}
errorFile := filepath.Join(l.root, l.errorFile)
if err := os.Remove(errorFile); err != nil {
if os.IsNotExist(err) {
return
}
l.Logger.Error(err, "can't delete the error-file", "filename", errorFile)
}
}
func init() { func init() {
prometheus.MustRegister(syncDuration) prometheus.MustRegister(syncDuration)
prometheus.MustRegister(syncCount) prometheus.MustRegister(syncCount)
@ -394,7 +305,8 @@ func main() {
stdflag.CommandLine.Parse(nil) // Otherwise glog complains stdflag.CommandLine.Parse(nil) // Otherwise glog complains
setGlogFlags() setGlogFlags()
log = &customLogger{glogr.New(), *flRoot, *flErrorFile} log = logging.New(*flRoot, *flErrorFile)
cmdRunner = cmd.NewRunner(log)
if *flVersion { if *flVersion {
fmt.Println(version.VERSION) fmt.Println(version.VERSION)
@ -456,6 +368,18 @@ func main() {
handleError(true, "ERROR: --sync-timeout must be at least 10ms") handleError(true, "ERROR: --sync-timeout must be at least 10ms")
} }
if *flSyncHookCommand != "" {
*flExechookCommand = *flSyncHookCommand
}
if *flExechookCommand != "" {
if *flExechookTimeout < time.Second {
handleError(true, "ERROR: --exechook-timeout must be at least 1s")
}
if *flExechookBackoff < time.Second {
handleError(true, "ERROR: --exechook-backoff must be at least 1s")
}
}
if *flWebhookURL != "" { if *flWebhookURL != "" {
if *flWebhookStatusSuccess < -1 { if *flWebhookStatusSuccess < -1 {
handleError(true, "ERROR: --webhook-success-status must be a valid HTTP code or -1") handleError(true, "ERROR: --webhook-success-status must be a valid HTTP code or -1")
@ -635,17 +559,42 @@ func main() {
} }
// Startup webhooks goroutine // Startup webhooks goroutine
var webhook *Webhook var webhookRunner *hook.HookRunner
if *flWebhookURL != "" { if *flWebhookURL != "" {
webhook = &Webhook{ webhook := hook.NewWebhook(
URL: *flWebhookURL, *flWebhookURL,
Method: *flWebhookMethod, *flWebhookMethod,
Success: *flWebhookStatusSuccess, *flWebhookStatusSuccess,
Timeout: *flWebhookTimeout, *flWebhookTimeout,
Backoff: *flWebhookBackoff, log,
Data: NewWebhookData(), )
webhookRunner = hook.NewHookRunner(
webhook,
*flWebhookBackoff,
hook.NewHookData(),
log,
)
go webhookRunner.Run(context.Background())
} }
go webhook.run()
// Startup exechooks goroutine
var exechookRunner *hook.HookRunner
if *flExechookCommand != "" {
exechook := hook.NewExechook(
cmd.NewRunner(log),
*flExechookCommand,
*flRoot,
[]string{},
*flExechookTimeout,
log,
)
exechookRunner = hook.NewHookRunner(
exechook,
*flExechookBackoff,
hook.NewHookData(),
log,
)
go exechookRunner.Run(context.Background())
} }
initialSync := true initialSync := true
@ -676,9 +625,13 @@ func main() {
time.Sleep(*flPeriod) time.Sleep(*flPeriod)
continue continue
} else if changed { } else if changed {
if webhook != nil { if webhookRunner != nil {
webhook.Send(hash) webhookRunner.Send(hash)
} }
if exechookRunner != nil {
exechookRunner.Send(hash)
}
updateSyncMetrics(metricKeySuccess, start) updateSyncMetrics(metricKeySuccess, start)
} else { } else {
updateSyncMetrics(metricKeyNoOp, start) updateSyncMetrics(metricKeyNoOp, start)
@ -686,7 +639,7 @@ func main() {
if initialSync { if initialSync {
if *flOneTime { if *flOneTime {
log.deleteErrorFile() log.DeleteErrorFile()
os.Exit(0) os.Exit(0)
} }
if isHash, err := git.RevIsHash(ctx); err != nil { if isHash, err := git.RevIsHash(ctx); err != nil {
@ -694,14 +647,14 @@ func main() {
os.Exit(1) os.Exit(1)
} else if isHash { } else if isHash {
log.V(0).Info("rev appears to be a git hash, no further sync needed", "rev", git.rev) log.V(0).Info("rev appears to be a git hash, no further sync needed", "rev", git.rev)
log.deleteErrorFile() log.DeleteErrorFile()
sleepForever() sleepForever()
} }
initialSync = false initialSync = false
} }
failCount = 0 failCount = 0
log.deleteErrorFile() log.DeleteErrorFile()
log.V(1).Info("next sync", "waitTime", flPeriod.String()) log.V(1).Info("next sync", "waitTime", flPeriod.String())
cancel() cancel()
time.Sleep(*flPeriod) time.Sleep(*flPeriod)
@ -761,7 +714,7 @@ func (git *repoSync) SanityCheck(ctx context.Context) bool {
} }
// Check that this is actually the root of the repo. // Check that this is actually the root of the repo.
if root, err := runCommand(ctx, git.root, git.cmd, "rev-parse", "--show-toplevel"); err != nil { if root, err := cmdRunner.Run(ctx, git.root, git.cmd, "rev-parse", "--show-toplevel"); err != nil {
log.Error(err, "can't get repo toplevel", "repo", git.root) log.Error(err, "can't get repo toplevel", "repo", git.root)
return false return false
} else { } else {
@ -773,7 +726,7 @@ func (git *repoSync) SanityCheck(ctx context.Context) bool {
} }
// Consistency-check the repo. // Consistency-check the repo.
if _, err := runCommand(ctx, git.root, git.cmd, "fsck", "--no-progress", "--connectivity-only"); err != nil { if _, err := cmdRunner.Run(ctx, git.root, git.cmd, "fsck", "--no-progress", "--connectivity-only"); err != nil {
log.Error(err, "repo sanity check failed", "repo", git.root) log.Error(err, "repo sanity check failed", "repo", git.root)
return false return false
} }
@ -828,7 +781,7 @@ func handleError(printUsage bool, format string, a ...interface{}) {
if printUsage { if printUsage {
pflag.Usage() pflag.Usage()
} }
log.exportError(s) log.ExportError(s)
os.Exit(1) os.Exit(1)
} }
@ -875,12 +828,12 @@ func (git *repoSync) UpdateSymlink(ctx context.Context, newDir string) (string,
const tmplink = "tmp-link" const tmplink = "tmp-link"
log.V(1).Info("creating tmp symlink", "root", git.root, "dst", newDirRelative, "src", tmplink) log.V(1).Info("creating tmp symlink", "root", git.root, "dst", newDirRelative, "src", tmplink)
if _, err := runCommand(ctx, git.root, "ln", "-snf", newDirRelative, tmplink); err != nil { if _, err := cmdRunner.Run(ctx, git.root, "ln", "-snf", newDirRelative, tmplink); err != nil {
return "", fmt.Errorf("error creating symlink: %v", err) return "", fmt.Errorf("error creating symlink: %v", err)
} }
log.V(1).Info("renaming symlink", "root", git.root, "oldName", tmplink, "newName", git.link) log.V(1).Info("renaming symlink", "root", git.root, "oldName", tmplink, "newName", git.link)
if _, err := runCommand(ctx, git.root, "mv", "-T", tmplink, git.link); err != nil { if _, err := cmdRunner.Run(ctx, git.root, "mv", "-T", tmplink, git.link); err != nil {
return "", fmt.Errorf("error replacing symlink: %v", err) return "", fmt.Errorf("error replacing symlink: %v", err)
} }
@ -909,7 +862,7 @@ func cleanupWorkTree(ctx context.Context, gitRoot, worktree string) error {
log.V(1).Info("removing worktree", "path", worktree) log.V(1).Info("removing worktree", "path", worktree)
if err := os.RemoveAll(worktree); err != nil { if err := os.RemoveAll(worktree); err != nil {
return fmt.Errorf("error removing directory: %v", err) return fmt.Errorf("error removing directory: %v", err)
} else if _, err := runCommand(ctx, gitRoot, *flGitCmd, "worktree", "prune"); err != nil { } else if _, err := cmdRunner.Run(ctx, gitRoot, *flGitCmd, "worktree", "prune"); err != nil {
return err return err
} }
return nil return nil
@ -926,7 +879,7 @@ func (git *repoSync) AddWorktreeAndSwap(ctx context.Context, hash string) error
args = append(args, "origin", git.branch) args = append(args, "origin", git.branch)
// Update from the remote. // Update from the remote.
if _, err := runCommand(ctx, git.root, git.cmd, args...); err != nil { if _, err := cmdRunner.Run(ctx, git.root, git.cmd, args...); err != nil {
return err return err
} }
@ -939,7 +892,7 @@ func (git *repoSync) AddWorktreeAndSwap(ctx context.Context, hash string) error
} }
// GC clone // GC clone
if _, err := runCommand(ctx, git.root, git.cmd, "gc", "--prune=all"); err != nil { if _, err := cmdRunner.Run(ctx, git.root, git.cmd, "gc", "--prune=all"); err != nil {
return err return err
} }
@ -954,7 +907,7 @@ func (git *repoSync) AddWorktreeAndSwap(ctx context.Context, hash string) error
return err return err
} }
_, err := runCommand(ctx, git.root, git.cmd, "worktree", "add", worktreePath, "origin/"+git.branch, "--no-checkout") _, err := cmdRunner.Run(ctx, git.root, git.cmd, "worktree", "add", worktreePath, "origin/"+git.branch, "--no-checkout")
log.V(0).Info("adding worktree", "path", worktreePath, "branch", fmt.Sprintf("origin/%s", git.branch)) log.V(0).Info("adding worktree", "path", worktreePath, "branch", fmt.Sprintf("origin/%s", git.branch))
if err != nil { if err != nil {
return err return err
@ -1009,14 +962,14 @@ func (git *repoSync) AddWorktreeAndSwap(ctx context.Context, hash string) error
} }
args := []string{"sparse-checkout", "init"} args := []string{"sparse-checkout", "init"}
_, err = runCommand(ctx, worktreePath, git.cmd, args...) _, err = cmdRunner.Run(ctx, worktreePath, git.cmd, args...)
if err != nil { if err != nil {
return err return err
} }
} }
// Reset the worktree's working copy to the specific rev. // Reset the worktree's working copy to the specific rev.
_, err = runCommand(ctx, worktreePath, git.cmd, "reset", "--hard", hash, "--") _, err = cmdRunner.Run(ctx, worktreePath, git.cmd, "reset", "--hard", hash, "--")
if err != nil { if err != nil {
return err return err
} }
@ -1033,7 +986,7 @@ func (git *repoSync) AddWorktreeAndSwap(ctx context.Context, hash string) error
if git.depth != 0 { if git.depth != 0 {
submodulesArgs = append(submodulesArgs, "--depth", strconv.Itoa(git.depth)) submodulesArgs = append(submodulesArgs, "--depth", strconv.Itoa(git.depth))
} }
_, err = runCommand(ctx, worktreePath, git.cmd, submodulesArgs...) _, err = cmdRunner.Run(ctx, worktreePath, git.cmd, submodulesArgs...)
if err != nil { if err != nil {
return err return err
} }
@ -1043,14 +996,14 @@ func (git *repoSync) AddWorktreeAndSwap(ctx context.Context, hash string) error
if git.chmod != 0 { if git.chmod != 0 {
mode := fmt.Sprintf("%#o", git.chmod) mode := fmt.Sprintf("%#o", git.chmod)
log.V(0).Info("changing file permissions", "mode", mode) log.V(0).Info("changing file permissions", "mode", mode)
_, err = runCommand(ctx, "", "chmod", "-R", mode, worktreePath) _, err = cmdRunner.Run(ctx, "", "chmod", "-R", mode, worktreePath)
if err != nil { if err != nil {
return err return err
} }
} }
// Reset the root's rev (so we can prune and so we can rely on it later). // Reset the root's rev (so we can prune and so we can rely on it later).
_, err = runCommand(ctx, git.root, git.cmd, "reset", "--hard", hash, "--") _, err = cmdRunner.Run(ctx, git.root, git.cmd, "reset", "--hard", hash, "--")
if err != nil { if err != nil {
return err return err
} }
@ -1065,17 +1018,6 @@ func (git *repoSync) AddWorktreeAndSwap(ctx context.Context, hash string) error
// From here on we have to save errors until the end. // From here on we have to save errors until the end.
// Execute the hook command, if requested.
var execErr error
if git.syncHookCmd != "" {
log.V(0).Info("executing command for git sync hooks", "command", git.syncHookCmd)
// TODO: move this to be async like webhook?
if _, err := runCommand(ctx, worktreePath, git.syncHookCmd); err != nil {
// Save it until after cleanup runs.
execErr = err
}
}
// Clean up previous worktrees. // Clean up previous worktrees.
var cleanupErr error var cleanupErr error
if oldWorktree != "" { if oldWorktree != "" {
@ -1085,9 +1027,6 @@ func (git *repoSync) AddWorktreeAndSwap(ctx context.Context, hash string) error
if cleanupErr != nil { if cleanupErr != nil {
return cleanupErr return cleanupErr
} }
if execErr != nil {
return execErr
}
return nil return nil
} }
@ -1100,7 +1039,7 @@ func (git *repoSync) CloneRepo(ctx context.Context) error {
args = append(args, git.repo, git.root) args = append(args, git.repo, git.root)
log.V(0).Info("cloning repo", "origin", git.repo, "path", git.root) log.V(0).Info("cloning repo", "origin", git.repo, "path", git.root)
_, err := runCommand(ctx, "", git.cmd, args...) _, err := cmdRunner.Run(ctx, "", git.cmd, args...)
if err != nil { if err != nil {
if strings.Contains(err.Error(), "already exists and is not an empty directory") { if strings.Contains(err.Error(), "already exists and is not an empty directory") {
// Maybe a previous run crashed? Git won't use this dir. // Maybe a previous run crashed? Git won't use this dir.
@ -1109,7 +1048,7 @@ func (git *repoSync) CloneRepo(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
_, err = runCommand(ctx, "", git.cmd, args...) _, err = cmdRunner.Run(ctx, "", git.cmd, args...)
if err != nil { if err != nil {
return err return err
} }
@ -1154,7 +1093,7 @@ func (git *repoSync) CloneRepo(ctx context.Context) error {
} }
args := []string{"sparse-checkout", "init"} args := []string{"sparse-checkout", "init"}
_, err = runCommand(ctx, git.root, git.cmd, args...) _, err = cmdRunner.Run(ctx, git.root, git.cmd, args...)
if err != nil { if err != nil {
return err return err
} }
@ -1165,7 +1104,7 @@ func (git *repoSync) CloneRepo(ctx context.Context) error {
// LocalHashForRev returns the locally known hash for a given rev. // LocalHashForRev returns the locally known hash for a given rev.
func (git *repoSync) LocalHashForRev(ctx context.Context, rev string) (string, error) { func (git *repoSync) LocalHashForRev(ctx context.Context, rev string) (string, error) {
output, err := runCommand(ctx, git.root, git.cmd, "rev-parse", rev) output, err := cmdRunner.Run(ctx, git.root, git.cmd, "rev-parse", rev)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -1174,7 +1113,7 @@ func (git *repoSync) LocalHashForRev(ctx context.Context, rev string) (string, e
// RemoteHashForRef returns the upstream hash for a given ref. // RemoteHashForRef returns the upstream hash for a given ref.
func (git *repoSync) RemoteHashForRef(ctx context.Context, ref string) (string, error) { func (git *repoSync) RemoteHashForRef(ctx context.Context, ref string) (string, error) {
output, err := runCommand(ctx, git.root, git.cmd, "ls-remote", "-q", "origin", ref) output, err := cmdRunner.Run(ctx, git.root, git.cmd, "ls-remote", "-q", "origin", ref)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -1192,7 +1131,7 @@ func (git *repoSync) RevIsHash(ctx context.Context) (bool, error) {
func (git *repoSync) ResolveRef(ctx context.Context, ref string) (string, error) { func (git *repoSync) ResolveRef(ctx context.Context, ref string) (string, error) {
// If git doesn't identify rev as a commit, we're done. // If git doesn't identify rev as a commit, we're done.
output, err := runCommand(ctx, git.root, git.cmd, "cat-file", "-t", ref) output, err := cmdRunner.Run(ctx, git.root, git.cmd, "cat-file", "-t", ref)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -1284,65 +1223,18 @@ func (git *repoSync) GetRevs(ctx context.Context) (string, string, error) {
return local, remote, nil return local, remote, nil
} }
func cmdForLog(command string, args ...string) string {
if strings.ContainsAny(command, " \t\n") {
command = fmt.Sprintf("%q", command)
}
// Don't modify the passed-in args.
argsCopy := make([]string, len(args))
copy(argsCopy, args)
for i := range args {
if strings.ContainsAny(args[i], " \t\n") {
argsCopy[i] = fmt.Sprintf("%q", args[i])
}
}
return command + " " + strings.Join(argsCopy, " ")
}
func runCommand(ctx context.Context, cwd, command string, args ...string) (string, error) {
return runCommandWithStdin(ctx, cwd, "", command, args...)
}
func runCommandWithStdin(ctx context.Context, cwd, stdin, command string, args ...string) (string, error) {
cmdStr := cmdForLog(command, args...)
log.V(5).Info("running command", "cwd", cwd, "cmd", cmdStr)
cmd := exec.CommandContext(ctx, command, args...)
if cwd != "" {
cmd.Dir = cwd
}
outbuf := bytes.NewBuffer(nil)
errbuf := bytes.NewBuffer(nil)
cmd.Stdout = outbuf
cmd.Stderr = errbuf
cmd.Stdin = bytes.NewBufferString(stdin)
err := cmd.Run()
stdout := outbuf.String()
stderr := errbuf.String()
if ctx.Err() == context.DeadlineExceeded {
return "", fmt.Errorf("Run(%s): %w: { stdout: %q, stderr: %q }", cmdStr, ctx.Err(), stdout, stderr)
}
if err != nil {
return "", fmt.Errorf("Run(%s): %w: { stdout: %q, stderr: %q }", cmdStr, err, stdout, stderr)
}
log.V(6).Info("command result", "stdout", stdout, "stderr", stderr)
return stdout, nil
}
// SetupAuth configures the local git repo to use a username and password when // SetupAuth configures the local git repo to use a username and password when
// accessing the repo. // accessing the repo.
func (git *repoSync) SetupAuth(ctx context.Context, username, password string) error { func (git *repoSync) SetupAuth(ctx context.Context, username, password string) error {
log.V(1).Info("setting up git credential store") log.V(1).Info("setting up git credential store")
_, err := runCommand(ctx, "", git.cmd, "config", "--global", "credential.helper", "store") _, err := cmdRunner.Run(ctx, "", git.cmd, "config", "--global", "credential.helper", "store")
if err != nil { if err != nil {
return fmt.Errorf("can't configure git credential helper: %w", err) return fmt.Errorf("can't configure git credential helper: %w", err)
} }
creds := fmt.Sprintf("url=%v\nusername=%v\npassword=%v\n", git.repo, username, password) creds := fmt.Sprintf("url=%v\nusername=%v\npassword=%v\n", git.repo, username, password)
_, err = runCommandWithStdin(ctx, "", creds, git.cmd, "credential", "approve") _, err = cmdRunner.RunWithStdin(ctx, "", creds, git.cmd, "credential", "approve")
if err != nil { if err != nil {
return fmt.Errorf("can't configure git credentials: %w", err) return fmt.Errorf("can't configure git credentials: %w", err)
} }
@ -1386,7 +1278,7 @@ func (git *repoSync) SetupCookieFile(ctx context.Context) error {
return fmt.Errorf("can't access git cookiefile: %w", err) return fmt.Errorf("can't access git cookiefile: %w", err)
} }
if _, err = runCommand(ctx, "", git.cmd, "config", "--global", "http.cookiefile", pathToCookieFile); err != nil { if _, err = cmdRunner.Run(ctx, "", git.cmd, "config", "--global", "http.cookiefile", pathToCookieFile); err != nil {
return fmt.Errorf("can't configure git cookiefile: %w", err) return fmt.Errorf("can't configure git cookiefile: %w", err)
} }
@ -1462,7 +1354,7 @@ func (git *repoSync) setupExtraGitConfigs(ctx context.Context, configsFlag strin
return fmt.Errorf("can't parse --git-config flag: %v", err) return fmt.Errorf("can't parse --git-config flag: %v", err)
} }
for _, kv := range configs { for _, kv := range configs {
if _, err := runCommand(ctx, "", git.cmd, "config", "--global", kv.key, kv.val); err != nil { if _, err := cmdRunner.Run(ctx, "", git.cmd, "config", "--global", kv.key, kv.val); err != nil {
return fmt.Errorf("error configuring additional git configs %q %q: %v", kv.key, kv.val, err) return fmt.Errorf("error configuring additional git configs %q %q: %v", kv.key, kv.val, err)
} }
} }
@ -1683,6 +1575,22 @@ OPTIONS
with a period. (default: "", which means error reporting will be with a period. (default: "", which means error reporting will be
disabled) disabled)
--exechook-backoff <duration>, $GIT_SYNC_EXECHOOK_BACKOFF
The time to wait before retrying a failed --exechook-command.
(default: 3s)
--exechook-command <string>, $GIT_EXECHOOK_COMMAND
An optional command to be executed after syncing a new hash of the
remote repository. This command does not take any arguments and
executes with the synced repo as its working directory. The
execution is subject to the overall --sync-timeout flag and will
extend the effective period between sync attempts. This flag
obsoletes --sync-hook-command, but if sync-hook-command is
specified, it will take precedence.
--exechook-timeout <duration>, $GIT_SYNC_EXECHOOK_TIMEOUT
The timeout for the --exechook-command. (default: 30s)
--git <string>, $GIT_SYNC_GIT --git <string>, $GIT_SYNC_GIT
The git command to run (subject to PATH search, mostly for testing). The git command to run (subject to PATH search, mostly for testing).
(default: git) (default: git)
@ -1779,13 +1687,6 @@ OPTIONS
The git submodule behavior: one of 'recursive', 'shallow', or 'off'. The git submodule behavior: one of 'recursive', 'shallow', or 'off'.
(default: recursive) (default: recursive)
--sync-hook-command <string>, $GIT_SYNC_HOOK_COMMAND
An optional command to be executed after syncing a new hash of the
remote repository. This command does not take any arguments and
executes with the synced repo as its working directory. The
execution is subject to the overall --sync-timeout flag and will
extend the effective period between sync attempts.
--sync-timeout <duration>, $GIT_SYNC_SYNC_TIMEOUT --sync-timeout <duration>, $GIT_SYNC_SYNC_TIMEOUT
The total time allowed for one complete sync. This must be at least The total time allowed for one complete sync. This must be at least
10ms. This flag obsoletes --timeout, but if --timeout is specified, 10ms. This flag obsoletes --timeout, but if --timeout is specified,
@ -1803,7 +1704,7 @@ OPTIONS
Print the version and exit. Print the version and exit.
--webhook-backoff <duration>, $GIT_SYNC_WEBHOOK_BACKOFF --webhook-backoff <duration>, $GIT_SYNC_WEBHOOK_BACKOFF
The time to wait before retrying a failed --webhook-url). The time to wait before retrying a failed --webhook-url.
(default: 3s) (default: 3s)
--webhook-method <string>, $GIT_SYNC_WEBHOOK_METHOD --webhook-method <string>, $GIT_SYNC_WEBHOOK_METHOD

View File

@ -1,140 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"fmt"
"net/http"
"sync"
"time"
)
// WebHook structure
type Webhook struct {
// URL for the http/s request
URL string
// Method for the http/s request
Method string
// Code to look for when determining if the request was successful.
// If this is not specified, request is sent and forgotten about.
Success int
// Timeout for the http/s request
Timeout time.Duration
// Backoff for failed webhook calls
Backoff time.Duration
// Holds the data as it crosses from producer to consumer.
Data *webhookData
}
type webhookData struct {
ch chan struct{}
mutex sync.Mutex
hash string
}
func NewWebhookData() *webhookData {
return &webhookData{
ch: make(chan struct{}, 1),
}
}
func (d *webhookData) events() chan struct{} {
return d.ch
}
func (d *webhookData) get() string {
d.mutex.Lock()
defer d.mutex.Unlock()
return d.hash
}
func (d *webhookData) set(newHash string) {
d.mutex.Lock()
defer d.mutex.Unlock()
d.hash = newHash
}
func (d *webhookData) send(newHash string) {
d.set(newHash)
// Non-blocking write. If the channel is full, the consumer will see the
// newest value. If the channel was not full, the consumer will get another
// event.
select {
case d.ch <- struct{}{}:
default:
}
}
func (w *Webhook) Send(hash string) {
w.Data.send(hash)
}
func (w *Webhook) Do(hash string) error {
req, err := http.NewRequest(w.Method, w.URL, nil)
if err != nil {
return err
}
req.Header.Set("Gitsync-Hash", hash)
ctx, cancel := context.WithTimeout(context.Background(), w.Timeout)
defer cancel()
req = req.WithContext(ctx)
log.V(0).Info("sending webhook", "hash", hash, "url", w.URL, "method", w.Method, "timeout", w.Timeout.String())
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
resp.Body.Close()
// If the webhook has a success statusCode, check against it
if w.Success != -1 && resp.StatusCode != w.Success {
return fmt.Errorf("received response code %d expected %d", resp.StatusCode, w.Success)
}
return nil
}
// Wait for trigger events from the channel, and send webhooks when triggered
func (w *Webhook) run() {
var lastHash string
// Wait for trigger from webhookData.Send
for range w.Data.events() {
// Retry in case of error
for {
// Always get the latest value, in case we fail-and-retry and the
// value changed in the meantime. This means that we might not send
// every single hash.
hash := w.Data.get()
if hash == lastHash {
break
}
if err := w.Do(hash); err != nil {
log.Error(err, "webhook failed", "url", w.URL, "method", w.Method, "timeout", w.Timeout.String())
time.Sleep(w.Backoff)
} else {
lastHash = hash
break
}
}
}
}

86
pkg/cmd/cmd.go Normal file
View File

@ -0,0 +1,86 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"fmt"
"os/exec"
"strings"
"k8s.io/git-sync/pkg/logging"
)
// Runner is an API to run commands and log them in a consistent way.
type Runner struct {
// Logger
logger *logging.Logger
}
// NewRunner returns a new CommandRunner
func NewRunner(logger *logging.Logger) *Runner {
return &Runner{logger: logger}
}
// Run runs given command
func (r *Runner) Run(ctx context.Context, cwd, command string, args ...string) (string, error) {
return r.RunWithStdin(ctx, cwd, "", command, args...)
}
// RunWithStdin runs given command with stardart input
func (r *Runner) RunWithStdin(ctx context.Context, cwd, stdin, command string, args ...string) (string, error) {
cmdStr := cmdForLog(command, args...)
r.logger.V(5).Info("running command", "cwd", cwd, "cmd", cmdStr)
cmd := exec.CommandContext(ctx, command, args...)
if cwd != "" {
cmd.Dir = cwd
}
outbuf := bytes.NewBuffer(nil)
errbuf := bytes.NewBuffer(nil)
cmd.Stdout = outbuf
cmd.Stderr = errbuf
cmd.Stdin = bytes.NewBufferString(stdin)
err := cmd.Run()
stdout := outbuf.String()
stderr := errbuf.String()
if ctx.Err() == context.DeadlineExceeded {
return "", fmt.Errorf("Run(%s): %w: { stdout: %q, stderr: %q }", cmdStr, ctx.Err(), stdout, stderr)
}
if err != nil {
return "", fmt.Errorf("Run(%s): %w: { stdout: %q, stderr: %q }", cmdStr, err, stdout, stderr)
}
r.logger.V(6).Info("command result", "stdout", stdout, "stderr", stderr)
return stdout, nil
}
func cmdForLog(command string, args ...string) string {
if strings.ContainsAny(command, " \t\n") {
command = fmt.Sprintf("%q", command)
}
argsCopy := make([]string, len(args))
copy(argsCopy, args)
for i := range args {
if strings.ContainsAny(args[i], " \t\n") {
argsCopy[i] = fmt.Sprintf("%q", args[i])
}
}
return command + " " + strings.Join(argsCopy, " ")
}

71
pkg/hook/exechook.go Normal file
View File

@ -0,0 +1,71 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hook
import (
"context"
"path/filepath"
"time"
"k8s.io/git-sync/pkg/cmd"
"k8s.io/git-sync/pkg/logging"
)
// Exechook structure, implements Hook
type Exechook struct {
// Runner
cmdrunner *cmd.Runner
// Command to run
command string
// Command args
args []string
// Git root path
gitRoot string
// Timeout for the command
timeout time.Duration
// Logger
logger *logging.Logger
}
// NewExechook returns a new Exechook
func NewExechook(cmdrunner *cmd.Runner, command, gitroot string, args []string, timeout time.Duration, logger *logging.Logger) *Exechook {
return &Exechook{
cmdrunner: cmdrunner,
command: command,
gitRoot: gitroot,
args: args,
timeout: timeout,
logger: logger,
}
}
// Name describes hook, implements Hook.Name
func (h *Exechook) Name() string {
return "exechook"
}
// Do runs exechook.command, implements Hook.Do
func (h *Exechook) Do(ctx context.Context, hash string) error {
ctx, cancel := context.WithTimeout(ctx, h.timeout)
defer cancel()
worktreePath := filepath.Join(h.gitRoot, hash)
h.logger.V(0).Info("running exechook", "command", h.command, "timeout", h.timeout)
_, err := h.cmdrunner.Run(ctx, worktreePath, h.command, h.args...)
return err
}

80
pkg/hook/exechook_test.go Normal file
View File

@ -0,0 +1,80 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hook
import (
"context"
"testing"
"time"
"k8s.io/git-sync/pkg/cmd"
"k8s.io/git-sync/pkg/logging"
)
func TestNotZeroReturnExechookDo(t *testing.T) {
t.Run("test not zero return code", func(t *testing.T) {
l := logging.New("", "")
ch := NewExechook(
cmd.NewRunner(l),
"false",
"/tmp",
[]string{},
time.Second,
l,
)
err := ch.Do(context.Background(), "")
if err == nil {
t.Fatalf("expected error but got none")
}
})
}
func TestZeroReturnExechookDo(t *testing.T) {
t.Run("test zero return code", func(t *testing.T) {
l := logging.New("", "")
ch := NewExechook(
cmd.NewRunner(l),
"true",
"/tmp",
[]string{},
time.Second,
l,
)
err := ch.Do(context.Background(), "")
if err != nil {
t.Fatalf("expected nil but got err")
}
})
}
func TestTimeoutExechookDo(t *testing.T) {
t.Run("test timeout", func(t *testing.T) {
l := logging.New("", "")
ch := NewExechook(
cmd.NewRunner(l),
"/bin/sh",
"/tmp",
[]string{"-c", "sleep 2"},
time.Second,
l,
)
err := ch.Do(context.Background(), "")
if err == nil {
t.Fatalf("expected err but got nil")
}
})
}

138
pkg/hook/hook.go Normal file
View File

@ -0,0 +1,138 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hook
import (
"context"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/git-sync/pkg/logging"
)
var (
hookRunCount = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "git_sync_hook_run_count_total",
Help: "How many hook runs completed, partitioned by name and state (success, error)",
}, []string{"name", "status"})
)
// Describes what a Hook needs to implement, run by HookRunner
type Hook interface {
// Describes hook
Name() string
// Function that called by HookRunner
Do(ctx context.Context, hash string) error
}
type hookData struct {
ch chan struct{}
mutex sync.Mutex
hash string
}
// NewHookData returns a new HookData
func NewHookData() *hookData {
return &hookData{
ch: make(chan struct{}, 1),
}
}
func (d *hookData) events() chan struct{} {
return d.ch
}
func (d *hookData) get() string {
d.mutex.Lock()
defer d.mutex.Unlock()
return d.hash
}
func (d *hookData) set(newHash string) {
d.mutex.Lock()
defer d.mutex.Unlock()
d.hash = newHash
}
func (d *hookData) send(newHash string) {
d.set(newHash)
// Non-blocking write. If the channel is full, the consumer will see the
// newest value. If the channel was not full, the consumer will get another
// event.
select {
case d.ch <- struct{}{}:
default:
}
}
// NewHookRunner returns a new HookRunner
func NewHookRunner(hook Hook, backoff time.Duration, data *hookData, log *logging.Logger) *HookRunner {
return &HookRunner{hook: hook, backoff: backoff, data: data, logger: log}
}
// HookRunner struct
type HookRunner struct {
// Hook to run and check
hook Hook
// Backoff for failed hooks
backoff time.Duration
// Holds the data as it crosses from producer to consumer.
data *hookData
// Logger
logger *logging.Logger
}
// Send sends hash to hookdata
func (r *HookRunner) Send(hash string) {
r.data.send(hash)
}
// Run waits for trigger events from the channel, and run hook when triggered
func (r *HookRunner) Run(ctx context.Context) {
var lastHash string
prometheus.MustRegister(hookRunCount)
// Wait for trigger from hookData.Send
for range r.data.events() {
// Retry in case of error
for {
// Always get the latest value, in case we fail-and-retry and the
// value changed in the meantime. This means that we might not send
// every single hash.
hash := r.data.get()
if hash == lastHash {
break
}
if err := r.hook.Do(ctx, hash); err != nil {
r.logger.Error(err, "hook failed")
updateHookRunCountMetric(r.hook.Name(), "error")
time.Sleep(r.backoff)
} else {
updateHookRunCountMetric(r.hook.Name(), "success")
lastHash = hash
break
}
}
}
}
func updateHookRunCountMetric(name, status string) {
hookRunCount.WithLabelValues(name, status).Inc()
}

View File

@ -14,12 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package main package hook
import ( import (
"fmt" "fmt"
"testing" "testing"
"time"
) )
const ( const (
@ -27,72 +26,55 @@ const (
hash2 = "2222222222222222222222222222222222222222" hash2 = "2222222222222222222222222222222222222222"
) )
func TestWebhookData(t *testing.T) { func TestHookData(t *testing.T) {
t.Run("webhook consumes first hash value", func(t *testing.T) { t.Run("hook consumes first hash value", func(t *testing.T) {
whd := NewWebhookData() hd := NewHookData()
whd.send(hash1) hd.send(hash1)
<-whd.events() <-hd.events()
hash := whd.get() hash := hd.get()
if hash1 != hash { if hash1 != hash {
t.Fatalf("expected hash %s but got %s", hash1, hash) t.Fatalf("expected hash %s but got %s", hash1, hash)
} }
}) })
t.Run("last update wins when channel buffer is full", func(t *testing.T) { t.Run("last update wins when channel buffer is full", func(t *testing.T) {
whd := NewWebhookData() hd := NewHookData()
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
h := fmt.Sprintf("111111111111111111111111111111111111111%d", i) h := fmt.Sprintf("111111111111111111111111111111111111111%d", i)
whd.send(h) hd.send(h)
} }
whd.send(hash2) hd.send(hash2)
<-whd.events() <-hd.events()
hash := whd.get() hash := hd.get()
if hash2 != hash { if hash2 != hash {
t.Fatalf("expected hash %s but got %s", hash2, hash) t.Fatalf("expected hash %s but got %s", hash2, hash)
} }
}) })
t.Run("same hash value", func(t *testing.T) { t.Run("same hash value", func(t *testing.T) {
whd := NewWebhookData() hd := NewHookData()
events := whd.events() events := hd.events()
whd.send(hash1) hd.send(hash1)
<-events <-events
hash := whd.get() hash := hd.get()
if hash1 != hash { if hash1 != hash {
t.Fatalf("expected hash %s but got %s", hash1, hash) t.Fatalf("expected hash %s but got %s", hash1, hash)
} }
whd.send(hash1) hd.send(hash1)
<-events <-events
hash = whd.get() hash = hd.get()
if hash1 != hash { if hash1 != hash {
t.Fatalf("expected hash %s but got %s", hash1, hash) t.Fatalf("expected hash %s but got %s", hash1, hash)
} }
}) })
} }
func TestDo(t *testing.T) {
t.Run("test invalid urls are handled", func(t *testing.T) {
wh := Webhook{
URL: ":http://localhost:601426/hooks/webhook",
Method: "POST",
Success: 200,
Timeout: time.Second,
Backoff: time.Second * 3,
Data: NewWebhookData(),
}
err := wh.Do("hash")
if err == nil {
t.Fatalf("expected error for invalid url but got none")
}
})
}

84
pkg/hook/webhook.go Normal file
View File

@ -0,0 +1,84 @@
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hook
import (
"context"
"fmt"
"net/http"
"time"
"k8s.io/git-sync/pkg/logging"
)
// WebHook structure, implements Hook
type Webhook struct {
// Url for the http/s request
url string
// Method for the http/s request
method string
// Code to look for when determining if the request was successful.
// If this is not specified, request is sent and forgotten about.
success int
// Timeout for the http/s request
timeout time.Duration
// Logger
logger *logging.Logger
}
// NewWebhook returns a new WebHook
func NewWebhook(url, method string, success int, timeout time.Duration, logger *logging.Logger) *Webhook {
return &Webhook{
url: url,
method: method,
success: success,
timeout: timeout,
logger: logger,
}
}
// Name describes hook, implements Hook.Name
func (w *Webhook) Name() string {
return "webhook"
}
// Do calls webhook.url, implements Hook.Do
func (w *Webhook) Do(ctx context.Context, hash string) error {
req, err := http.NewRequest(w.method, w.url, nil)
if err != nil {
return err
}
req.Header.Set("Gitsync-Hash", hash)
ctx, cancel := context.WithTimeout(ctx, w.timeout)
defer cancel()
req = req.WithContext(ctx)
w.logger.V(0).Info("sending webhook", "hash", hash, "url", w.url, "method", w.method, "timeout", w.timeout)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
resp.Body.Close()
// If the webhook has a success statusCode, check against it
if w.success != -1 && resp.StatusCode != w.success {
return fmt.Errorf("received response code %d expected %d", resp.StatusCode, w.success)
}
return nil
}

41
pkg/hook/webhook_test.go Normal file
View File

@ -0,0 +1,41 @@
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hook
import (
"context"
"testing"
"time"
"k8s.io/git-sync/pkg/logging"
)
func TestWebhookDo(t *testing.T) {
t.Run("test invalid urls are handled", func(t *testing.T) {
wh := NewWebhook(
":http://localhost:601426/hooks/webhook",
"POST",
200,
time.Second,
logging.New("", ""),
)
err := wh.Do(context.Background(), "hash")
if err == nil {
t.Fatalf("expected error for invalid url but got none")
}
})
}

152
pkg/logging/logging.go Normal file
View File

@ -0,0 +1,152 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logging
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/go-logr/glogr"
"github.com/go-logr/logr"
)
// Logger provides a logging interface.
type Logger struct {
log logr.Logger
root string
errorFile string
}
// New returns a Logger, with the same API as logr.Logger.
func New(root string, errorFile string) *Logger {
return &Logger{log: glogr.New(), root: root, errorFile: errorFile}
}
// Info implements logr.Logger.Info.
func (l *Logger) Info(msg string, keysAndValues ...interface{}) {
l.log.Info(msg, keysAndValues...)
}
// Error implements logr.Logger.Error.
func (l *Logger) Error(err error, msg string, kvList ...interface{}) {
l.log.Error(err, msg, kvList...)
if l.errorFile == "" {
return
}
payload := struct {
Msg string
Err string
Args map[string]interface{}
}{
Msg: msg,
Err: err.Error(),
Args: map[string]interface{}{},
}
if len(kvList)%2 != 0 {
kvList = append(kvList, "<no-value>")
}
for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string)
if !ok {
k = fmt.Sprintf("%v", kvList[i])
}
payload.Args[k] = kvList[i+1]
}
jb, err := json.Marshal(payload)
if err != nil {
l.log.Error(err, "can't encode error payload")
content := fmt.Sprintf("%v", err)
l.writeContent([]byte(content))
} else {
l.writeContent(jb)
}
}
// V implements logr.Logger.V.
func (l *Logger) V(level int) logr.Logger {
return l.log.V(level)
}
// WithValues implements logr.Logger.WithValues.
func (l *Logger) WithValues(keysAndValues ...interface{}) logr.Logger {
return l.log.WithValues(keysAndValues...)
}
// WithName implements logr.Logger.WithName.
func (l *Logger) WithName(name string) logr.Logger {
return l.log.WithName(name)
}
// ExportError exports the error to the error file if --export-error is enabled.
func (l *Logger) ExportError(content string) {
if l.errorFile == "" {
return
}
l.writeContent([]byte(content))
}
// DeleteErrorFile deletes the error file.
func (l *Logger) DeleteErrorFile() {
if l.errorFile == "" {
return
}
errorFile := filepath.Join(l.root, l.errorFile)
if err := os.Remove(errorFile); err != nil {
if os.IsNotExist(err) {
return
}
l.log.Error(err, "can't delete the error-file", "filename", errorFile)
}
}
// writeContent writes the error content to the error file.
func (l *Logger) writeContent(content []byte) {
if _, err := os.Stat(l.root); os.IsNotExist(err) {
fileMode := os.FileMode(0755)
if err := os.Mkdir(l.root, fileMode); err != nil {
l.log.Error(err, "can't create the root directory", "root", l.root)
return
}
}
tmpFile, err := ioutil.TempFile(l.root, "tmp-err-")
if err != nil {
l.log.Error(err, "can't create temporary error-file", "directory", l.root, "prefix", "tmp-err-")
return
}
defer func() {
if err := tmpFile.Close(); err != nil {
l.log.Error(err, "can't close temporary error-file", "filename", tmpFile.Name())
}
}()
if _, err = tmpFile.Write(content); err != nil {
l.log.Error(err, "can't write to temporary error-file", "filename", tmpFile.Name())
return
}
errorFile := filepath.Join(l.root, l.errorFile)
if err := os.Rename(tmpFile.Name(), errorFile); err != nil {
l.log.Error(err, "can't rename to error-file", "temp-file", tmpFile.Name(), "error-file", errorFile)
return
}
if err := os.Chmod(errorFile, 0644); err != nil {
l.log.Error(err, "can't change permissions on the error-file", "error-file", errorFile)
}
}

View File

@ -164,7 +164,11 @@ trap finish INT EXIT
SLOW_GIT_CLONE=/slow_git_clone.sh SLOW_GIT_CLONE=/slow_git_clone.sh
SLOW_GIT_FETCH=/slow_git_fetch.sh SLOW_GIT_FETCH=/slow_git_fetch.sh
ASKPASS_GIT=/askpass_git.sh ASKPASS_GIT=/askpass_git.sh
SYNC_HOOK_COMMAND=/test_sync_hook_command.sh EXECHOOK_COMMAND=/test_exechook_command.sh
EXECHOOK_COMMAND_FAIL=/test_exechook_command_fail.sh
RUNLOG="$DIR/runlog.exechook-fail-retry"
rm -f $RUNLOG
touch $RUNLOG
function GIT_SYNC() { function GIT_SYNC() {
#./bin/linux_amd64/git-sync "$@" #./bin/linux_amd64/git-sync "$@"
@ -178,7 +182,9 @@ function GIT_SYNC() {
-v "$(pwd)/slow_git_clone.sh":"$SLOW_GIT_CLONE":ro \ -v "$(pwd)/slow_git_clone.sh":"$SLOW_GIT_CLONE":ro \
-v "$(pwd)/slow_git_fetch.sh":"$SLOW_GIT_FETCH":ro \ -v "$(pwd)/slow_git_fetch.sh":"$SLOW_GIT_FETCH":ro \
-v "$(pwd)/askpass_git.sh":"$ASKPASS_GIT":ro \ -v "$(pwd)/askpass_git.sh":"$ASKPASS_GIT":ro \
-v "$(pwd)/test_sync_hook_command.sh":"$SYNC_HOOK_COMMAND":ro \ -v "$(pwd)/test_exechook_command.sh":"$EXECHOOK_COMMAND":ro \
-v "$(pwd)/test_exechook_command_fail.sh":"$EXECHOOK_COMMAND_FAIL":ro \
-v "$RUNLOG":/var/log/runs \
-v "$DOT_SSH/id_test":"/etc/git-secret/ssh":ro \ -v "$DOT_SSH/id_test":"/etc/git-secret/ssh":ro \
--env XDG_CONFIG_HOME=$DIR \ --env XDG_CONFIG_HOME=$DIR \
e2e/git-sync:$(make -s version)__$(go env GOOS)_$(go env GOARCH) \ e2e/git-sync:$(make -s version)__$(go env GOOS)_$(go env GOARCH) \
@ -949,9 +955,9 @@ assert_file_eq "$ROOT"/link/file "$TESTCASE 1"
pass pass
############################################## ##############################################
# Test sync_hook_command # Test exechook-success
############################################## ##############################################
testcase "sync_hook_command" testcase "exechook-success"
# First sync # First sync
echo "$TESTCASE 1" > "$REPO"/file echo "$TESTCASE 1" > "$REPO"/file
git -C "$REPO" commit -qam "$TESTCASE 1" git -C "$REPO" commit -qam "$TESTCASE 1"
@ -961,30 +967,55 @@ GIT_SYNC \
--branch=e2e-branch \ --branch=e2e-branch \
--root="$ROOT" \ --root="$ROOT" \
--link="link" \ --link="link" \
--sync-hook-command="$SYNC_HOOK_COMMAND" \ --exechook-command="$EXECHOOK_COMMAND" \
> "$DIR"/log."$TESTCASE" 2>&1 & > "$DIR"/log."$TESTCASE" 2>&1 &
sleep 3 sleep 3
assert_link_exists "$ROOT"/link assert_link_exists "$ROOT"/link
assert_file_exists "$ROOT"/link/file assert_file_exists "$ROOT"/link/file
assert_file_exists "$ROOT"/link/sync-hook assert_file_exists "$ROOT"/link/exechook
assert_file_exists "$ROOT"/link/link-sync-hook assert_file_exists "$ROOT"/link/link-exechook
assert_file_eq "$ROOT"/link/file "$TESTCASE 1" assert_file_eq "$ROOT"/link/file "$TESTCASE 1"
assert_file_eq "$ROOT"/link/sync-hook "$TESTCASE 1" assert_file_eq "$ROOT"/link/exechook "$TESTCASE 1"
assert_file_eq "$ROOT"/link/link-sync-hook "$TESTCASE 1" assert_file_eq "$ROOT"/link/link-exechook "$TESTCASE 1"
# Move forward # Move forward
echo "$TESTCASE 2" > "$REPO"/file echo "$TESTCASE 2" > "$REPO"/file
git -C "$REPO" commit -qam "$TESTCASE 2" git -C "$REPO" commit -qam "$TESTCASE 2"
sleep 3 sleep 3
assert_link_exists "$ROOT"/link assert_link_exists "$ROOT"/link
assert_file_exists "$ROOT"/link/file assert_file_exists "$ROOT"/link/file
assert_file_exists "$ROOT"/link/sync-hook assert_file_exists "$ROOT"/link/exechook
assert_file_exists "$ROOT"/link/link-sync-hook assert_file_exists "$ROOT"/link/link-exechook
assert_file_eq "$ROOT"/link/file "$TESTCASE 2" assert_file_eq "$ROOT"/link/file "$TESTCASE 2"
assert_file_eq "$ROOT"/link/sync-hook "$TESTCASE 2" assert_file_eq "$ROOT"/link/exechook "$TESTCASE 2"
assert_file_eq "$ROOT"/link/link-sync-hook "$TESTCASE 2" assert_file_eq "$ROOT"/link/link-exechook "$TESTCASE 2"
# Wrap up # Wrap up
pass pass
##############################################
# Test exechook-fail-retry
##############################################
testcase "exechook-fail-retry"
cat /dev/null > "$RUNLOG"
# First sync - return a failure to ensure that we try again
echo "$TESTCASE 1" > "$REPO"/file
git -C "$REPO" commit -qam "$TESTCASE 1"
GIT_SYNC \
--period=100ms \
--repo="file://$REPO" \
--branch=e2e-branch \
--root="$ROOT" \
--link="link" \
--exechook-command="$EXECHOOK_COMMAND_FAIL" \
--exechook-backoff=1s \
> "$DIR"/log."$TESTCASE" 2>&1 &
# Check that exechook was called
sleep 5
RUNS=$(cat "$RUNLOG" | wc -l)
if [ "$RUNS" -lt 2 ]; then
fail "exechook called $RUNS times, it should be at least 2"
fi
pass
############################################## ##############################################
# Test webhook success # Test webhook success
############################################## ##############################################

View File

@ -14,8 +14,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# Use for e2e test of --sync-hook-command. # Use for e2e test of --exechook-command.
# This option takes no command arguments, so requires a wrapper script. # This option takes no command arguments, so requires a wrapper script.
cat file > sync-hook cat file > exechook
cat ../link/file > link-sync-hook cat ../link/file > link-exechook

21
test_exechook_command_fail.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/sh
#
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Use for e2e test of --exechook-command.
# This option takes no command arguments, so requires a wrapper script.
date >> /var/log/runs
exit 1