mirror of https://github.com/kubernetes/kops.git
Vendor goimports by adding it to tools.go
This commit is contained in:
parent
274de31154
commit
47f9689910
2
go.mod
2
go.mod
|
@ -120,7 +120,7 @@ require (
|
|||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529
|
||||
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a
|
||||
golang.org/x/tools v0.0.0-20191124021906-f5828fc9a103 // indirect
|
||||
golang.org/x/tools v0.0.0-20191124021906-f5828fc9a103
|
||||
google.golang.org/api v0.0.0-20181220000619-583d854617af
|
||||
gopkg.in/gcfg.v1 v1.2.0
|
||||
gopkg.in/inf.v0 v0.9.1
|
||||
|
|
2
tools.go
2
tools.go
|
@ -26,6 +26,8 @@ import (
|
|||
|
||||
_ "github.com/client9/misspell/cmd/misspell"
|
||||
|
||||
_ "golang.org/x/tools/cmd/goimports"
|
||||
|
||||
_ "honnef.co/go/tools/cmd/staticcheck"
|
||||
|
||||
_ "sigs.k8s.io/controller-tools/cmd/controller-gen"
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"goimports.go",
|
||||
"goimports_gc.go",
|
||||
],
|
||||
importmap = "k8s.io/kops/vendor/golang.org/x/tools/cmd/goimports",
|
||||
importpath = "golang.org/x/tools/cmd/goimports",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["//vendor/golang.org/x/tools/internal/imports:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "goimports",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
|
||||
Command goimports updates your Go import lines,
|
||||
adding missing ones and removing unreferenced ones.
|
||||
|
||||
$ go get golang.org/x/tools/cmd/goimports
|
||||
|
||||
In addition to fixing imports, goimports also formats
|
||||
your code in the same style as gofmt so it can be used
|
||||
as a replacement for your editor's gofmt-on-save hook.
|
||||
|
||||
For emacs, make sure you have the latest go-mode.el:
|
||||
https://github.com/dominikh/go-mode.el
|
||||
Then in your .emacs file:
|
||||
(setq gofmt-command "goimports")
|
||||
(add-hook 'before-save-hook 'gofmt-before-save)
|
||||
|
||||
For vim, set "gofmt_command" to "goimports":
|
||||
https://golang.org/change/39c724dd7f252
|
||||
https://golang.org/wiki/IDEsAndTextEditorPlugins
|
||||
etc
|
||||
|
||||
For GoSublime, follow the steps described here:
|
||||
http://michaelwhatcott.com/gosublime-goimports/
|
||||
|
||||
For other editors, you probably know what to do.
|
||||
|
||||
To exclude directories in your $GOPATH from being scanned for Go
|
||||
files, goimports respects a configuration file at
|
||||
$GOPATH/src/.goimportsignore which may contain blank lines, comment
|
||||
lines (beginning with '#'), or lines naming a directory relative to
|
||||
the configuration file to ignore when scanning. No globbing or regex
|
||||
patterns are allowed. Use the "-v" verbose flag to verify it's
|
||||
working and see what goimports is doing.
|
||||
|
||||
File bugs or feature requests at:
|
||||
|
||||
https://golang.org/issues/new?title=x/tools/cmd/goimports:+
|
||||
|
||||
Happy hacking!
|
||||
|
||||
*/
|
||||
package main // import "golang.org/x/tools/cmd/goimports"
|
|
@ -0,0 +1,377 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/scanner"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/internal/imports"
|
||||
)
|
||||
|
||||
var (
|
||||
// main operation modes
|
||||
list = flag.Bool("l", false, "list files whose formatting differs from goimport's")
|
||||
write = flag.Bool("w", false, "write result to (source) file instead of stdout")
|
||||
doDiff = flag.Bool("d", false, "display diffs instead of rewriting files")
|
||||
srcdir = flag.String("srcdir", "", "choose imports as if source code is from `dir`. When operating on a single file, dir may instead be the complete file name.")
|
||||
|
||||
verbose bool // verbose logging
|
||||
|
||||
cpuProfile = flag.String("cpuprofile", "", "CPU profile output")
|
||||
memProfile = flag.String("memprofile", "", "memory profile output")
|
||||
memProfileRate = flag.Int("memrate", 0, "if > 0, sets runtime.MemProfileRate")
|
||||
|
||||
options = &imports.Options{
|
||||
TabWidth: 8,
|
||||
TabIndent: true,
|
||||
Comments: true,
|
||||
Fragment: true,
|
||||
// This environment, and its caches, will be reused for the whole run.
|
||||
Env: &imports.ProcessEnv{
|
||||
GOPATH: build.Default.GOPATH,
|
||||
GOROOT: build.Default.GOROOT,
|
||||
},
|
||||
}
|
||||
exitCode = 0
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)")
|
||||
flag.StringVar(&options.Env.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list")
|
||||
flag.BoolVar(&options.FormatOnly, "format-only", false, "if true, don't fix imports and only format. In this mode, goimports is effectively gofmt, with the addition that imports are grouped into sections.")
|
||||
}
|
||||
|
||||
func report(err error) {
|
||||
scanner.PrintError(os.Stderr, err)
|
||||
exitCode = 2
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: goimports [flags] [path ...]\n")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
func isGoFile(f os.FileInfo) bool {
|
||||
// ignore non-Go files
|
||||
name := f.Name()
|
||||
return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
|
||||
}
|
||||
|
||||
// argumentType is which mode goimports was invoked as.
|
||||
type argumentType int
|
||||
|
||||
const (
|
||||
// fromStdin means the user is piping their source into goimports.
|
||||
fromStdin argumentType = iota
|
||||
|
||||
// singleArg is the common case from editors, when goimports is run on
|
||||
// a single file.
|
||||
singleArg
|
||||
|
||||
// multipleArg is when the user ran "goimports file1.go file2.go"
|
||||
// or ran goimports on a directory tree.
|
||||
multipleArg
|
||||
)
|
||||
|
||||
func processFile(filename string, in io.Reader, out io.Writer, argType argumentType) error {
|
||||
opt := options
|
||||
if argType == fromStdin {
|
||||
nopt := *options
|
||||
nopt.Fragment = true
|
||||
opt = &nopt
|
||||
}
|
||||
|
||||
if in == nil {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
in = f
|
||||
}
|
||||
|
||||
src, err := ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
target := filename
|
||||
if *srcdir != "" {
|
||||
// Determine whether the provided -srcdirc is a directory or file
|
||||
// and then use it to override the target.
|
||||
//
|
||||
// See https://github.com/dominikh/go-mode.el/issues/146
|
||||
if isFile(*srcdir) {
|
||||
if argType == multipleArg {
|
||||
return errors.New("-srcdir value can't be a file when passing multiple arguments or when walking directories")
|
||||
}
|
||||
target = *srcdir
|
||||
} else if argType == singleArg && strings.HasSuffix(*srcdir, ".go") && !isDir(*srcdir) {
|
||||
// For a file which doesn't exist on disk yet, but might shortly.
|
||||
// e.g. user in editor opens $DIR/newfile.go and newfile.go doesn't yet exist on disk.
|
||||
// The goimports on-save hook writes the buffer to a temp file
|
||||
// first and runs goimports before the actual save to newfile.go.
|
||||
// The editor's buffer is named "newfile.go" so that is passed to goimports as:
|
||||
// goimports -srcdir=/gopath/src/pkg/newfile.go /tmp/gofmtXXXXXXXX.go
|
||||
// and then the editor reloads the result from the tmp file and writes
|
||||
// it to newfile.go.
|
||||
target = *srcdir
|
||||
} else {
|
||||
// Pretend that file is from *srcdir in order to decide
|
||||
// visible imports correctly.
|
||||
target = filepath.Join(*srcdir, filepath.Base(filename))
|
||||
}
|
||||
}
|
||||
|
||||
res, err := imports.Process(target, src, opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(src, res) {
|
||||
// formatting has changed
|
||||
if *list {
|
||||
fmt.Fprintln(out, filename)
|
||||
}
|
||||
if *write {
|
||||
if argType == fromStdin {
|
||||
// filename is "<standard input>"
|
||||
return errors.New("can't use -w on stdin")
|
||||
}
|
||||
err = ioutil.WriteFile(filename, res, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if *doDiff {
|
||||
if argType == fromStdin {
|
||||
filename = "stdin.go" // because <standard input>.orig looks silly
|
||||
}
|
||||
data, err := diff(src, res, filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("computing diff: %s", err)
|
||||
}
|
||||
fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename))
|
||||
out.Write(data)
|
||||
}
|
||||
}
|
||||
|
||||
if !*list && !*write && !*doDiff {
|
||||
_, err = out.Write(res)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func visitFile(path string, f os.FileInfo, err error) error {
|
||||
if err == nil && isGoFile(f) {
|
||||
err = processFile(path, nil, os.Stdout, multipleArg)
|
||||
}
|
||||
if err != nil {
|
||||
report(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func walkDir(path string) {
|
||||
filepath.Walk(path, visitFile)
|
||||
}
|
||||
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
// call gofmtMain in a separate function
|
||||
// so that it can use defer and have them
|
||||
// run before the exit.
|
||||
gofmtMain()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
// parseFlags parses command line flags and returns the paths to process.
|
||||
// It's a var so that custom implementations can replace it in other files.
|
||||
var parseFlags = func() []string {
|
||||
flag.BoolVar(&verbose, "v", false, "verbose logging")
|
||||
|
||||
flag.Parse()
|
||||
return flag.Args()
|
||||
}
|
||||
|
||||
func bufferedFileWriter(dest string) (w io.Writer, close func()) {
|
||||
f, err := os.Create(dest)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
bw := bufio.NewWriter(f)
|
||||
return bw, func() {
|
||||
if err := bw.Flush(); err != nil {
|
||||
log.Fatalf("error flushing %v: %v", dest, err)
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func gofmtMain() {
|
||||
flag.Usage = usage
|
||||
paths := parseFlags()
|
||||
|
||||
if *cpuProfile != "" {
|
||||
bw, flush := bufferedFileWriter(*cpuProfile)
|
||||
pprof.StartCPUProfile(bw)
|
||||
defer flush()
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
// doTrace is a conditionally compiled wrapper around runtime/trace. It is
|
||||
// used to allow goimports to compile under gccgo, which does not support
|
||||
// runtime/trace. See https://golang.org/issue/15544.
|
||||
defer doTrace()()
|
||||
if *memProfileRate > 0 {
|
||||
runtime.MemProfileRate = *memProfileRate
|
||||
bw, flush := bufferedFileWriter(*memProfile)
|
||||
defer func() {
|
||||
runtime.GC() // materialize all statistics
|
||||
if err := pprof.WriteHeapProfile(bw); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
flush()
|
||||
}()
|
||||
}
|
||||
|
||||
if verbose {
|
||||
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
|
||||
options.Env.Debug = true
|
||||
}
|
||||
if options.TabWidth < 0 {
|
||||
fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", options.TabWidth)
|
||||
exitCode = 2
|
||||
return
|
||||
}
|
||||
|
||||
if len(paths) == 0 {
|
||||
if err := processFile("<standard input>", os.Stdin, os.Stdout, fromStdin); err != nil {
|
||||
report(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
argType := singleArg
|
||||
if len(paths) > 1 {
|
||||
argType = multipleArg
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
switch dir, err := os.Stat(path); {
|
||||
case err != nil:
|
||||
report(err)
|
||||
case dir.IsDir():
|
||||
walkDir(path)
|
||||
default:
|
||||
if err := processFile(path, nil, os.Stdout, argType); err != nil {
|
||||
report(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func writeTempFile(dir, prefix string, data []byte) (string, error) {
|
||||
file, err := ioutil.TempFile(dir, prefix)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, err = file.Write(data)
|
||||
if err1 := file.Close(); err == nil {
|
||||
err = err1
|
||||
}
|
||||
if err != nil {
|
||||
os.Remove(file.Name())
|
||||
return "", err
|
||||
}
|
||||
return file.Name(), nil
|
||||
}
|
||||
|
||||
func diff(b1, b2 []byte, filename string) (data []byte, err error) {
|
||||
f1, err := writeTempFile("", "gofmt", b1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.Remove(f1)
|
||||
|
||||
f2, err := writeTempFile("", "gofmt", b2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.Remove(f2)
|
||||
|
||||
cmd := "diff"
|
||||
if runtime.GOOS == "plan9" {
|
||||
cmd = "/bin/ape/diff"
|
||||
}
|
||||
|
||||
data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput()
|
||||
if len(data) > 0 {
|
||||
// diff exits with a non-zero status when the files don't match.
|
||||
// Ignore that failure as long as we get output.
|
||||
return replaceTempFilename(data, filename)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// replaceTempFilename replaces temporary filenames in diff with actual one.
|
||||
//
|
||||
// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500
|
||||
// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500
|
||||
// ...
|
||||
// ->
|
||||
// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500
|
||||
// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500
|
||||
// ...
|
||||
func replaceTempFilename(diff []byte, filename string) ([]byte, error) {
|
||||
bs := bytes.SplitN(diff, []byte{'\n'}, 3)
|
||||
if len(bs) < 3 {
|
||||
return nil, fmt.Errorf("got unexpected diff for %s", filename)
|
||||
}
|
||||
// Preserve timestamps.
|
||||
var t0, t1 []byte
|
||||
if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 {
|
||||
t0 = bs[0][i:]
|
||||
}
|
||||
if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 {
|
||||
t1 = bs[1][i:]
|
||||
}
|
||||
// Always print filepath with slash separator.
|
||||
f := filepath.ToSlash(filename)
|
||||
bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0))
|
||||
bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1))
|
||||
return bytes.Join(bs, []byte{'\n'}), nil
|
||||
}
|
||||
|
||||
// isFile reports whether name is a file.
|
||||
func isFile(name string) bool {
|
||||
fi, err := os.Stat(name)
|
||||
return err == nil && fi.Mode().IsRegular()
|
||||
}
|
||||
|
||||
// isDir reports whether name is a directory.
|
||||
func isDir(name string) bool {
|
||||
fi, err := os.Stat(name)
|
||||
return err == nil && fi.IsDir()
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build gc
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"runtime/trace"
|
||||
)
|
||||
|
||||
var traceProfile = flag.String("trace", "", "trace profile output")
|
||||
|
||||
func doTrace() func() {
|
||||
if *traceProfile != "" {
|
||||
bw, flush := bufferedFileWriter(*traceProfile)
|
||||
trace.Start(bw)
|
||||
return func() {
|
||||
flush()
|
||||
trace.Stop()
|
||||
}
|
||||
}
|
||||
return func() {}
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gc
|
||||
|
||||
package main
|
||||
|
||||
func doTrace() func() {
|
||||
return func() {}
|
||||
}
|
|
@ -128,11 +128,13 @@ type Pass struct {
|
|||
// See comments for ExportObjectFact.
|
||||
ExportPackageFact func(fact Fact)
|
||||
|
||||
// AllPackageFacts returns a new slice containing all package facts in unspecified order.
|
||||
// AllPackageFacts returns a new slice containing all package facts of the analysis's FactTypes
|
||||
// in unspecified order.
|
||||
// WARNING: This is an experimental API and may change in the future.
|
||||
AllPackageFacts func() []PackageFact
|
||||
|
||||
// AllObjectFacts returns a new slice containing all object facts in unspecified order.
|
||||
// AllObjectFacts returns a new slice containing all object facts of the analysis's FactTypes
|
||||
// in unspecified order.
|
||||
// WARNING: This is an experimental API and may change in the future.
|
||||
AllObjectFacts func() []ObjectFact
|
||||
|
||||
|
@ -161,13 +163,19 @@ func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) {
|
|||
pass.Report(Diagnostic{Pos: pos, Message: msg})
|
||||
}
|
||||
|
||||
// reportNodef is a helper function that reports a Diagnostic using the
|
||||
// range denoted by the AST node.
|
||||
//
|
||||
// WARNING: This is an experimental API and may change in the future.
|
||||
func (pass *Pass) reportNodef(node ast.Node, format string, args ...interface{}) {
|
||||
// The Range interface provides a range. It's equivalent to and satisfied by
|
||||
// ast.Node.
|
||||
type Range interface {
|
||||
Pos() token.Pos // position of first character belonging to the node
|
||||
End() token.Pos // position of first character immediately after the node
|
||||
}
|
||||
|
||||
// ReportRangef is a helper function that reports a Diagnostic using the
|
||||
// range provided. ast.Node values can be passed in as the range because
|
||||
// they satisfy the Range interface.
|
||||
func (pass *Pass) ReportRangef(rng Range, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
pass.Report(Diagnostic{Pos: node.Pos(), End: node.End(), Message: msg})
|
||||
pass.Report(Diagnostic{Pos: rng.Pos(), End: rng.End(), Message: msg})
|
||||
}
|
||||
|
||||
func (pass *Pass) String() string {
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build !experimental
|
||||
|
||||
package analysis
|
||||
|
||||
import "go/token"
|
||||
|
@ -17,4 +15,47 @@ type Diagnostic struct {
|
|||
End token.Pos // optional
|
||||
Category string // optional
|
||||
Message string
|
||||
|
||||
// SuggestedFixes contains suggested fixes for a diagnostic which can be used to perform
|
||||
// edits to a file that address the diagnostic.
|
||||
// TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic?
|
||||
// Diagnostics should not contain SuggestedFixes that overlap.
|
||||
// Experimental: This API is experimental and may change in the future.
|
||||
SuggestedFixes []SuggestedFix // optional
|
||||
|
||||
// Experimental: This API is experimental and may change in the future.
|
||||
Related []RelatedInformation // optional
|
||||
}
|
||||
|
||||
// RelatedInformation contains information related to a diagnostic.
|
||||
// For example, a diagnostic that flags duplicated declarations of a
|
||||
// variable may include one RelatedInformation per existing
|
||||
// declaration.
|
||||
type RelatedInformation struct {
|
||||
Pos token.Pos
|
||||
End token.Pos
|
||||
Message string
|
||||
}
|
||||
|
||||
// A SuggestedFix is a code change associated with a Diagnostic that a user can choose
|
||||
// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged
|
||||
// by the diagnostic.
|
||||
// TextEdits for a SuggestedFix should not overlap. TextEdits for a SuggestedFix
|
||||
// should not contain edits for other packages.
|
||||
// Experimental: This API is experimental and may change in the future.
|
||||
type SuggestedFix struct {
|
||||
// A description for this suggested fix to be shown to a user deciding
|
||||
// whether to accept it.
|
||||
Message string
|
||||
TextEdits []TextEdit
|
||||
}
|
||||
|
||||
// A TextEdit represents the replacement of the code between Pos and End with the new text.
|
||||
// Each TextEdit should apply to a single file. End should not be earlier in the file than Pos.
|
||||
// Experimental: This API is experimental and may change in the future.
|
||||
type TextEdit struct {
|
||||
// For a pure insertion, End can either be set to Pos or token.NoPos.
|
||||
Pos token.Pos
|
||||
End token.Pos
|
||||
NewText []byte
|
||||
}
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
// +build experimental
|
||||
|
||||
package analysis
|
||||
|
||||
import "go/token"
|
||||
|
||||
// A Diagnostic is a message associated with a source location or range.
|
||||
//
|
||||
// An Analyzer may return a variety of diagnostics; the optional Category,
|
||||
// which should be a constant, may be used to classify them.
|
||||
// It is primarily intended to make it easy to look up documentation.
|
||||
//
|
||||
// If End is provided, the diagnostic is specified to apply to the range between
|
||||
// Pos and End.
|
||||
type Diagnostic struct {
|
||||
Pos token.Pos
|
||||
End token.Pos // optional
|
||||
Category string // optional
|
||||
Message string
|
||||
|
||||
// TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic?
|
||||
SuggestedFixes []SuggestedFix // optional
|
||||
}
|
||||
|
||||
// A SuggestedFix is a code change associated with a Diagnostic that a user can choose
|
||||
// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged
|
||||
// by the diagnostic.
|
||||
type SuggestedFix struct {
|
||||
// A description for this suggested fix to be shown to a user deciding
|
||||
// whether to accept it.
|
||||
Message string
|
||||
TextEdits []TextEdit
|
||||
}
|
||||
|
||||
// A TextEdit represents the replacement of the code between Pos and End with the new text.
|
||||
type TextEdit struct {
|
||||
// For a pure insertion, End can either be set to Pos or token.NoPos.
|
||||
Pos token.Pos
|
||||
End token.Pos
|
||||
NewText []byte
|
||||
}
|
|
@ -67,7 +67,7 @@ To add a new Analyzer to an existing driver, add another item to the list:
|
|||
}
|
||||
|
||||
A driver may use the name, flags, and documentation to provide on-line
|
||||
help that describes the analyses its performs.
|
||||
help that describes the analyses it performs.
|
||||
The doc comment contains a brief one-line summary,
|
||||
optionally followed by paragraphs of explanation.
|
||||
The vet command, shown below, is an example of a driver that runs
|
||||
|
@ -169,7 +169,7 @@ type information, and source positions for a single package of Go code.
|
|||
|
||||
The OtherFiles field provides the names, but not the contents, of non-Go
|
||||
files such as assembly that are part of this package. See the "asmdecl"
|
||||
or "buildtags" analyzers for examples of loading non-Go files and report
|
||||
or "buildtags" analyzers for examples of loading non-Go files and reporting
|
||||
diagnostics against them.
|
||||
|
||||
The ResultOf field provides the results computed by the analyzers
|
||||
|
@ -231,7 +231,7 @@ understood as alternative or non-standard type systems. For example,
|
|||
vet's printf checker infers whether a function has the "printf wrapper"
|
||||
type, and it applies stricter checks to calls of such functions. In
|
||||
addition, it records which functions are printf wrappers for use by
|
||||
later analysis units to identify other printf wrappers by induction.
|
||||
later analysis passes to identify other printf wrappers by induction.
|
||||
A result such as “f is a printf wrapper” that is not interesting by
|
||||
itself but serves as a stepping stone to an interesting result (such as
|
||||
a diagnostic) is called a "fact".
|
||||
|
@ -252,9 +252,9 @@ An Analyzer that uses facts must declare their types:
|
|||
|
||||
type isWrapper struct{} // => *types.Func f “is a printf wrapper”
|
||||
|
||||
A driver program ensures that facts for a pass’s dependencies are
|
||||
generated before analyzing the pass and are responsible for propagating
|
||||
facts between from one pass to another, possibly across address spaces.
|
||||
The driver program ensures that facts for a pass’s dependencies are
|
||||
generated before analyzing the package and is responsible for propagating
|
||||
facts from one package to another, possibly across address spaces.
|
||||
Consequently, Facts must be serializable. The API requires that drivers
|
||||
use the gob encoding, an efficient, robust, self-describing binary
|
||||
protocol. A fact type may implement the GobEncoder/GobDecoder interfaces
|
||||
|
@ -288,10 +288,10 @@ not currently apply analyzers to packages of the standard library.
|
|||
Therefore, for best results, analyzer authors should not rely on
|
||||
analysis facts being available for standard packages.
|
||||
For example, although the printf checker is capable of deducing during
|
||||
analysis of the log package that log.Printf is a printf-wrapper,
|
||||
analysis of the log package that log.Printf is a printf wrapper,
|
||||
this fact is built in to the analyzer so that it correctly checks
|
||||
calls to log.Printf even when run in a driver that does not apply
|
||||
it to standard packages. We plan to remove this limitation in future.
|
||||
it to standard packages. We would like to remove this limitation in future.
|
||||
|
||||
|
||||
Testing an Analyzer
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
//
|
||||
// var Analyzer = &analysis.Analyzer{
|
||||
// ...
|
||||
// Requires: reflect.TypeOf(new(inspect.Analyzer)),
|
||||
// Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
// }
|
||||
//
|
||||
// func run(pass *analysis.Pass) (interface{}, error) {
|
||||
|
|
|
@ -9,13 +9,10 @@ import (
|
|||
// Validate reports an error if any of the analyzers are misconfigured.
|
||||
// Checks include:
|
||||
// that the name is a valid identifier;
|
||||
// that analyzer names are unique;
|
||||
// that the Requires graph is acylic;
|
||||
// that the Requires graph is acyclic;
|
||||
// that analyzer fact types are unique;
|
||||
// that each fact type is a pointer.
|
||||
func Validate(analyzers []*Analyzer) error {
|
||||
names := make(map[string]bool)
|
||||
|
||||
// Map each fact type to its sole generating analyzer.
|
||||
factTypes := make(map[reflect.Type]*Analyzer)
|
||||
|
||||
|
@ -39,10 +36,6 @@ func Validate(analyzers []*Analyzer) error {
|
|||
if !validIdent(a.Name) {
|
||||
return fmt.Errorf("invalid analyzer name %q", a)
|
||||
}
|
||||
if names[a.Name] {
|
||||
return fmt.Errorf("duplicate analyzer name %q", a)
|
||||
}
|
||||
names[a.Name] = true
|
||||
|
||||
if a.Doc == "" {
|
||||
return fmt.Errorf("analyzer %q is undocumented", a)
|
||||
|
|
|
@ -65,7 +65,7 @@ func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Conte
|
|||
//
|
||||
// The archive consists of a series of files. Each file consists of a
|
||||
// name, a decimal file size and the file contents, separated by
|
||||
// newlinews. No newline follows after the file contents.
|
||||
// newlines. No newline follows after the file contents.
|
||||
func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) {
|
||||
overlay := make(map[string][]byte)
|
||||
r := bufio.NewReader(archive)
|
||||
|
|
|
@ -100,7 +100,7 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
|
|||
// Write writes encoded type information for the specified package to out.
|
||||
// The FileSet provides file position information for named objects.
|
||||
func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
|
||||
b, err := gcimporter.BExportData(fset, pkg)
|
||||
b, err := gcimporter.IExportData(fset, pkg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -332,7 +332,7 @@ func (p *importer) pos() token.Pos {
|
|||
p.prevFile = file
|
||||
p.prevLine = line
|
||||
|
||||
return p.fake.pos(file, line)
|
||||
return p.fake.pos(file, line, 0)
|
||||
}
|
||||
|
||||
// Synthesize a token.Pos
|
||||
|
@ -341,7 +341,9 @@ type fakeFileSet struct {
|
|||
files map[string]*token.File
|
||||
}
|
||||
|
||||
func (s *fakeFileSet) pos(file string, line int) token.Pos {
|
||||
func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
|
||||
// TODO(mdempsky): Make use of column.
|
||||
|
||||
// Since we don't know the set of needed file positions, we
|
||||
// reserve maxlines positions per file.
|
||||
const maxlines = 64 * 1024
|
||||
|
@ -976,10 +978,11 @@ const (
|
|||
aliasTag
|
||||
)
|
||||
|
||||
var predeclOnce sync.Once
|
||||
var predecl []types.Type // initialized lazily
|
||||
|
||||
func predeclared() []types.Type {
|
||||
if predecl == nil {
|
||||
predeclOnce.Do(func() {
|
||||
// initialize lazily to be sure that all
|
||||
// elements have been initialized before
|
||||
predecl = []types.Type{ // basic types
|
||||
|
@ -1026,7 +1029,7 @@ func predeclared() []types.Type {
|
|||
// used internally by gc; never used by this package or in .a files
|
||||
anyType{},
|
||||
}
|
||||
}
|
||||
})
|
||||
return predecl
|
||||
}
|
||||
|
||||
|
|
|
@ -6,8 +6,6 @@
|
|||
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
|
||||
// see that file for specification of the format.
|
||||
|
||||
// +build go1.11
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
|
@ -28,7 +26,10 @@ import (
|
|||
const iexportVersion = 0
|
||||
|
||||
// IExportData returns the binary export data for pkg.
|
||||
//
|
||||
// If no file set is provided, position info will be missing.
|
||||
// The package path of the top-level package will not be recorded,
|
||||
// so that calls to IImportData can override with a provided package path.
|
||||
func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
|
@ -48,6 +49,7 @@ func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error)
|
|||
stringIndex: map[string]uint64{},
|
||||
declIndex: map[types.Object]uint64{},
|
||||
typIndex: map[types.Type]uint64{},
|
||||
localpkg: pkg,
|
||||
}
|
||||
|
||||
for i, pt := range predeclared() {
|
||||
|
@ -73,7 +75,7 @@ func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error)
|
|||
// Append indices to data0 section.
|
||||
dataLen := uint64(p.data0.Len())
|
||||
w := p.newWriter()
|
||||
w.writeIndex(p.declIndex, pkg)
|
||||
w.writeIndex(p.declIndex)
|
||||
w.flush()
|
||||
|
||||
// Assemble header.
|
||||
|
@ -95,14 +97,14 @@ func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error)
|
|||
// we're writing out the main index, which is also read by
|
||||
// non-compiler tools and includes a complete package description
|
||||
// (i.e., name and height).
|
||||
func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) {
|
||||
func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
|
||||
// Build a map from packages to objects from that package.
|
||||
pkgObjs := map[*types.Package][]types.Object{}
|
||||
|
||||
// For the main index, make sure to include every package that
|
||||
// we reference, even if we're not exporting (or reexporting)
|
||||
// any symbols from it.
|
||||
pkgObjs[localpkg] = nil
|
||||
pkgObjs[w.p.localpkg] = nil
|
||||
for pkg := range w.p.allPkgs {
|
||||
pkgObjs[pkg] = nil
|
||||
}
|
||||
|
@ -121,12 +123,12 @@ func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types
|
|||
}
|
||||
|
||||
sort.Slice(pkgs, func(i, j int) bool {
|
||||
return pkgs[i].Path() < pkgs[j].Path()
|
||||
return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j])
|
||||
})
|
||||
|
||||
w.uint64(uint64(len(pkgs)))
|
||||
for _, pkg := range pkgs {
|
||||
w.string(pkg.Path())
|
||||
w.string(w.exportPath(pkg))
|
||||
w.string(pkg.Name())
|
||||
w.uint64(uint64(0)) // package height is not needed for go/types
|
||||
|
||||
|
@ -143,6 +145,8 @@ type iexporter struct {
|
|||
fset *token.FileSet
|
||||
out *bytes.Buffer
|
||||
|
||||
localpkg *types.Package
|
||||
|
||||
// allPkgs tracks all packages that have been referenced by
|
||||
// the export data, so we can ensure to include them in the
|
||||
// main index.
|
||||
|
@ -195,6 +199,13 @@ type exportWriter struct {
|
|||
prevLine int64
|
||||
}
|
||||
|
||||
func (w *exportWriter) exportPath(pkg *types.Package) string {
|
||||
if pkg == w.p.localpkg {
|
||||
return ""
|
||||
}
|
||||
return pkg.Path()
|
||||
}
|
||||
|
||||
func (p *iexporter) doDecl(obj types.Object) {
|
||||
w := p.newWriter()
|
||||
w.setPkg(obj.Pkg(), false)
|
||||
|
@ -267,6 +278,11 @@ func (w *exportWriter) tag(tag byte) {
|
|||
}
|
||||
|
||||
func (w *exportWriter) pos(pos token.Pos) {
|
||||
if w.p.fset == nil {
|
||||
w.int64(0)
|
||||
return
|
||||
}
|
||||
|
||||
p := w.p.fset.Position(pos)
|
||||
file := p.Filename
|
||||
line := int64(p.Line)
|
||||
|
@ -299,7 +315,7 @@ func (w *exportWriter) pkg(pkg *types.Package) {
|
|||
// Ensure any referenced packages are declared in the main index.
|
||||
w.p.allPkgs[pkg] = true
|
||||
|
||||
w.string(pkg.Path())
|
||||
w.string(w.exportPath(pkg))
|
||||
}
|
||||
|
||||
func (w *exportWriter) qualifiedIdent(obj types.Object) {
|
||||
|
@ -394,7 +410,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
|
|||
w.pos(f.Pos())
|
||||
w.string(f.Name())
|
||||
w.typ(f.Type(), pkg)
|
||||
w.bool(f.Embedded())
|
||||
w.bool(f.Anonymous())
|
||||
w.string(t.Tag(i)) // note (or tag)
|
||||
}
|
||||
|
||||
|
|
|
@ -63,8 +63,8 @@ const (
|
|||
// If the export data version is not recognized or the format is otherwise
|
||||
// compromised, an error is returned.
|
||||
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
||||
const currentVersion = 0
|
||||
version := -1
|
||||
const currentVersion = 1
|
||||
version := int64(-1)
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
if version > currentVersion {
|
||||
|
@ -77,9 +77,9 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
|||
|
||||
r := &intReader{bytes.NewReader(data), path}
|
||||
|
||||
version = int(r.uint64())
|
||||
version = int64(r.uint64())
|
||||
switch version {
|
||||
case currentVersion:
|
||||
case currentVersion, 0:
|
||||
default:
|
||||
errorf("unknown iexport format version %d", version)
|
||||
}
|
||||
|
@ -93,7 +93,8 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
|||
r.Seek(sLen+dLen, io.SeekCurrent)
|
||||
|
||||
p := iimporter{
|
||||
ipath: path,
|
||||
ipath: path,
|
||||
version: int(version),
|
||||
|
||||
stringData: stringData,
|
||||
stringCache: make(map[uint64]string),
|
||||
|
@ -142,20 +143,18 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
|||
p.pkgIndex[pkg] = nameIndex
|
||||
pkgList[i] = pkg
|
||||
}
|
||||
var localpkg *types.Package
|
||||
for _, pkg := range pkgList {
|
||||
if pkg.Path() == path {
|
||||
localpkg = pkg
|
||||
}
|
||||
if len(pkgList) == 0 {
|
||||
errorf("no packages found for %s", path)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
names := make([]string, 0, len(p.pkgIndex[localpkg]))
|
||||
for name := range p.pkgIndex[localpkg] {
|
||||
p.ipkg = pkgList[0]
|
||||
names := make([]string, 0, len(p.pkgIndex[p.ipkg]))
|
||||
for name := range p.pkgIndex[p.ipkg] {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
for _, name := range names {
|
||||
p.doDecl(localpkg, name)
|
||||
p.doDecl(p.ipkg, name)
|
||||
}
|
||||
|
||||
for _, typ := range p.interfaceList {
|
||||
|
@ -165,17 +164,19 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
|||
// record all referenced packages as imports
|
||||
list := append(([]*types.Package)(nil), pkgList[1:]...)
|
||||
sort.Sort(byPath(list))
|
||||
localpkg.SetImports(list)
|
||||
p.ipkg.SetImports(list)
|
||||
|
||||
// package was imported completely and without errors
|
||||
localpkg.MarkComplete()
|
||||
p.ipkg.MarkComplete()
|
||||
|
||||
consumed, _ := r.Seek(0, io.SeekCurrent)
|
||||
return int(consumed), localpkg, nil
|
||||
return int(consumed), p.ipkg, nil
|
||||
}
|
||||
|
||||
type iimporter struct {
|
||||
ipath string
|
||||
ipath string
|
||||
ipkg *types.Package
|
||||
version int
|
||||
|
||||
stringData []byte
|
||||
stringCache map[uint64]string
|
||||
|
@ -226,6 +227,9 @@ func (p *iimporter) pkgAt(off uint64) *types.Package {
|
|||
return pkg
|
||||
}
|
||||
path := p.stringAt(off)
|
||||
if path == p.ipath {
|
||||
return p.ipkg
|
||||
}
|
||||
errorf("missing package %q in %q", path, p.ipath)
|
||||
return nil
|
||||
}
|
||||
|
@ -255,6 +259,7 @@ type importReader struct {
|
|||
currPkg *types.Package
|
||||
prevFile string
|
||||
prevLine int64
|
||||
prevColumn int64
|
||||
}
|
||||
|
||||
func (r *importReader) obj(name string) {
|
||||
|
@ -448,6 +453,19 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) {
|
|||
}
|
||||
|
||||
func (r *importReader) pos() token.Pos {
|
||||
if r.p.version >= 1 {
|
||||
r.posv1()
|
||||
} else {
|
||||
r.posv0()
|
||||
}
|
||||
|
||||
if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
|
||||
return token.NoPos
|
||||
}
|
||||
return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
|
||||
}
|
||||
|
||||
func (r *importReader) posv0() {
|
||||
delta := r.int64()
|
||||
if delta != deltaNewFile {
|
||||
r.prevLine += delta
|
||||
|
@ -457,12 +475,18 @@ func (r *importReader) pos() token.Pos {
|
|||
r.prevFile = r.string()
|
||||
r.prevLine = l
|
||||
}
|
||||
}
|
||||
|
||||
if r.prevFile == "" && r.prevLine == 0 {
|
||||
return token.NoPos
|
||||
func (r *importReader) posv1() {
|
||||
delta := r.int64()
|
||||
r.prevColumn += delta >> 1
|
||||
if delta&1 != 0 {
|
||||
delta = r.int64()
|
||||
r.prevLine += delta >> 1
|
||||
if delta&1 != 0 {
|
||||
r.prevFile = r.string()
|
||||
}
|
||||
}
|
||||
|
||||
return r.p.fake.pos(r.prevFile, int(r.prevLine))
|
||||
}
|
||||
|
||||
func (r *importReader) typ() types.Type {
|
||||
|
|
|
@ -81,21 +81,35 @@ func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, u
|
|||
args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"}
|
||||
args = append(args, buildFlags...)
|
||||
args = append(args, "--", "unsafe")
|
||||
stdout, err := InvokeGo(ctx, env, dir, usesExportData, args...)
|
||||
stdout, stderr, err := invokeGo(ctx, env, dir, usesExportData, args...)
|
||||
var goarch, compiler string
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if strings.Contains(err.Error(), "cannot find main module") {
|
||||
// User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc.
|
||||
// TODO(matloob): Is this a problem in practice?
|
||||
envout, _, enverr := invokeGo(ctx, env, dir, usesExportData, "env", "GOARCH")
|
||||
if enverr != nil {
|
||||
return nil, err
|
||||
}
|
||||
goarch = strings.TrimSpace(envout.String())
|
||||
compiler = "gc"
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
fields := strings.Fields(stdout.String())
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\" from stdout of go command:\n%s\ndir: %s\nstdout: <<%s>>\nstderr: <<%s>>",
|
||||
cmdDebugStr(env, args...), dir, stdout.String(), stderr.String())
|
||||
}
|
||||
goarch = fields[0]
|
||||
compiler = fields[1]
|
||||
}
|
||||
fields := strings.Fields(stdout.String())
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("could not determine GOARCH and Go compiler")
|
||||
}
|
||||
goarch := fields[0]
|
||||
compiler := fields[1]
|
||||
return types.SizesFor(compiler, goarch), nil
|
||||
}
|
||||
|
||||
// InvokeGo returns the stdout of a go command invocation.
|
||||
func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, error) {
|
||||
// invokeGo returns the stdout and stderr of a go command invocation.
|
||||
func invokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, *bytes.Buffer, error) {
|
||||
if debug {
|
||||
defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now())
|
||||
}
|
||||
|
@ -118,7 +132,7 @@ func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool
|
|||
// Catastrophic error:
|
||||
// - executable not found
|
||||
// - context cancellation
|
||||
return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
|
||||
return nil, nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
|
||||
}
|
||||
|
||||
// Export mode entails a build.
|
||||
|
@ -126,7 +140,7 @@ func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool
|
|||
// (despite the -e flag) and the Export field is blank.
|
||||
// Do not fail in that case.
|
||||
if !usesExportData {
|
||||
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
|
||||
return nil, nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -145,7 +159,7 @@ func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool
|
|||
fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout)
|
||||
}
|
||||
|
||||
return stdout, nil
|
||||
return stdout, stderr, nil
|
||||
}
|
||||
|
||||
func cmdDebugStr(envlist []string, args ...string) string {
|
||||
|
|
|
@ -7,6 +7,7 @@ go_library(
|
|||
"external.go",
|
||||
"golist.go",
|
||||
"golist_overlay.go",
|
||||
"loadmode_string.go",
|
||||
"packages.go",
|
||||
"visit.go",
|
||||
],
|
||||
|
@ -18,5 +19,6 @@ go_library(
|
|||
"//vendor/golang.org/x/tools/go/internal/packagesdriver:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/gopathwalk:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/semver:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/span:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -12,18 +12,34 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Driver
|
||||
// The Driver Protocol
|
||||
//
|
||||
// The driver, given the inputs to a call to Load, returns metadata about the packages specified.
|
||||
// This allows for different build systems to support go/packages by telling go/packages how the
|
||||
// packages' source is organized.
|
||||
// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in
|
||||
// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package
|
||||
// documentation in doc.go for the full description of the patterns that need to be supported.
|
||||
// A driver receives as a JSON-serialized driverRequest struct in standard input and will
|
||||
// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output.
|
||||
|
||||
// driverRequest is used to provide the portion of Load's Config that is needed by a driver.
|
||||
type driverRequest struct {
|
||||
Command string `json:"command"`
|
||||
Mode LoadMode `json:"mode"`
|
||||
Env []string `json:"env"`
|
||||
BuildFlags []string `json:"build_flags"`
|
||||
Tests bool `json:"tests"`
|
||||
Overlay map[string][]byte `json:"overlay"`
|
||||
Mode LoadMode `json:"mode"`
|
||||
// Env specifies the environment the underlying build system should be run in.
|
||||
Env []string `json:"env"`
|
||||
// BuildFlags are flags that should be passed to the underlying build system.
|
||||
BuildFlags []string `json:"build_flags"`
|
||||
// Tests specifies whether the patterns should also return test packages.
|
||||
Tests bool `json:"tests"`
|
||||
// Overlay maps file paths (relative to the driver's working directory) to the byte contents
|
||||
// of overlay files.
|
||||
Overlay map[string][]byte `json:"overlay"`
|
||||
}
|
||||
|
||||
// findExternalDriver returns the file path of a tool that supplies
|
||||
|
@ -61,12 +77,17 @@ func findExternalDriver(cfg *Config) driver {
|
|||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
stderr := new(bytes.Buffer)
|
||||
cmd := exec.CommandContext(cfg.Context, tool, words...)
|
||||
cmd.Dir = cfg.Dir
|
||||
cmd.Env = cfg.Env
|
||||
cmd.Stdin = bytes.NewReader(req)
|
||||
cmd.Stdout = buf
|
||||
cmd.Stderr = new(bytes.Buffer)
|
||||
cmd.Stderr = stderr
|
||||
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr)
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
|
@ -20,10 +21,12 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/tools/go/internal/packagesdriver"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
"golang.org/x/tools/internal/semver"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
// debug controls verbose logging.
|
||||
|
@ -71,6 +74,28 @@ func (r *responseDeduper) addRoot(id string) {
|
|||
r.dr.Roots = append(r.dr.Roots, id)
|
||||
}
|
||||
|
||||
// goInfo contains global information from the go tool.
|
||||
type goInfo struct {
|
||||
rootDirs map[string]string
|
||||
env goEnv
|
||||
}
|
||||
|
||||
type goEnv struct {
|
||||
modulesOn bool
|
||||
}
|
||||
|
||||
func determineEnv(cfg *Config) goEnv {
|
||||
buf, err := invokeGo(cfg, "env", "GOMOD")
|
||||
if err != nil {
|
||||
return goEnv{}
|
||||
}
|
||||
gomod := bytes.TrimSpace(buf.Bytes())
|
||||
|
||||
env := goEnv{}
|
||||
env.modulesOn = len(gomod) > 0
|
||||
return env
|
||||
}
|
||||
|
||||
// goListDriver uses the go list command to interpret the patterns and produce
|
||||
// the build system package structure.
|
||||
// See driver for more details.
|
||||
|
@ -85,6 +110,33 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
|||
sizeswg.Done()
|
||||
}()
|
||||
}
|
||||
defer sizeswg.Wait()
|
||||
|
||||
// start fetching rootDirs
|
||||
var info goInfo
|
||||
var rootDirsReady, envReady = make(chan struct{}), make(chan struct{})
|
||||
go func() {
|
||||
info.rootDirs = determineRootDirs(cfg)
|
||||
close(rootDirsReady)
|
||||
}()
|
||||
go func() {
|
||||
info.env = determineEnv(cfg)
|
||||
close(envReady)
|
||||
}()
|
||||
getGoInfo := func() *goInfo {
|
||||
<-rootDirsReady
|
||||
<-envReady
|
||||
return &info
|
||||
}
|
||||
|
||||
// Ensure that we don't leak goroutines: Load is synchronous, so callers will
|
||||
// not expect it to access the fields of cfg after the call returns.
|
||||
defer getGoInfo()
|
||||
|
||||
// always pass getGoInfo to golistDriver
|
||||
golistDriver := func(cfg *Config, patterns ...string) (*driverResponse, error) {
|
||||
return golistDriver(cfg, getGoInfo, patterns...)
|
||||
}
|
||||
|
||||
// Determine files requested in contains patterns
|
||||
var containFiles []string
|
||||
|
@ -147,7 +199,7 @@ extractQueries:
|
|||
var containsCandidates []string
|
||||
|
||||
if len(containFiles) != 0 {
|
||||
if err := runContainsQueries(cfg, golistDriver, response, containFiles); err != nil {
|
||||
if err := runContainsQueries(cfg, golistDriver, response, containFiles, getGoInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -158,7 +210,7 @@ extractQueries:
|
|||
}
|
||||
}
|
||||
|
||||
modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response)
|
||||
modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -166,7 +218,7 @@ extractQueries:
|
|||
containsCandidates = append(containsCandidates, modifiedPkgs...)
|
||||
containsCandidates = append(containsCandidates, needPkgs...)
|
||||
}
|
||||
if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs); err != nil {
|
||||
if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs, getGoInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Check candidate packages for containFiles.
|
||||
|
@ -198,26 +250,30 @@ extractQueries:
|
|||
return response.dr, nil
|
||||
}
|
||||
|
||||
func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error {
|
||||
func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string, getGoInfo func() *goInfo) error {
|
||||
if len(pkgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
dr, err := driver(cfg, pkgs...)
|
||||
drivercfg := *cfg
|
||||
if getGoInfo().env.modulesOn {
|
||||
drivercfg.BuildFlags = append(drivercfg.BuildFlags, "-mod=readonly")
|
||||
}
|
||||
dr, err := driver(&drivercfg, pkgs...)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, pkg := range dr.Packages {
|
||||
response.addPackage(pkg)
|
||||
}
|
||||
_, needPkgs, err := processGolistOverlay(cfg, response)
|
||||
_, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
addNeededOverlayPackages(cfg, driver, response, needPkgs)
|
||||
return nil
|
||||
return addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo)
|
||||
}
|
||||
|
||||
func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
|
||||
func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, goInfo func() *goInfo) error {
|
||||
for _, query := range queries {
|
||||
// TODO(matloob): Do only one query per directory.
|
||||
fdir := filepath.Dir(query)
|
||||
|
@ -229,12 +285,41 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q
|
|||
}
|
||||
dirResponse, err := driver(cfg, pattern)
|
||||
if err != nil {
|
||||
// Couldn't find a package for the directory. Try to load the file as an ad-hoc package.
|
||||
var queryErr error
|
||||
dirResponse, err = driver(cfg, query)
|
||||
if queryErr != nil {
|
||||
// Return the original error if the attempt to fall back failed.
|
||||
return err
|
||||
if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil {
|
||||
return err // return the original error
|
||||
}
|
||||
}
|
||||
// `go list` can report errors for files that are not listed as part of a package's GoFiles.
|
||||
// In the case of an invalid Go file, we should assume that it is part of package if only
|
||||
// one package is in the response. The file may have valid contents in an overlay.
|
||||
if len(dirResponse.Packages) == 1 {
|
||||
pkg := dirResponse.Packages[0]
|
||||
for i, err := range pkg.Errors {
|
||||
s := errorSpan(err)
|
||||
if !s.IsValid() {
|
||||
break
|
||||
}
|
||||
if len(pkg.CompiledGoFiles) == 0 {
|
||||
break
|
||||
}
|
||||
dir := filepath.Dir(pkg.CompiledGoFiles[0])
|
||||
filename := filepath.Join(dir, filepath.Base(s.URI().Filename()))
|
||||
if info, err := os.Stat(filename); err != nil || info.IsDir() {
|
||||
break
|
||||
}
|
||||
if !contains(pkg.CompiledGoFiles, filename) {
|
||||
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename)
|
||||
pkg.GoFiles = append(pkg.GoFiles, filename)
|
||||
pkg.Errors = append(pkg.Errors[:i], pkg.Errors[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
// A final attempt to construct an ad-hoc package.
|
||||
if len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1 {
|
||||
var queryErr error
|
||||
if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil {
|
||||
return err // return the original error
|
||||
}
|
||||
}
|
||||
isRoot := make(map[string]bool, len(dirResponse.Roots))
|
||||
|
@ -262,6 +347,75 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q
|
|||
return nil
|
||||
}
|
||||
|
||||
// adHocPackage attempts to construct an ad-hoc package given a query that failed.
|
||||
func adHocPackage(cfg *Config, driver driver, pattern, query string) (*driverResponse, error) {
|
||||
// There was an error loading the package. Try to load the file as an ad-hoc package.
|
||||
// Usually the error will appear in a returned package, but may not if we're in modules mode
|
||||
// and the ad-hoc is located outside a module.
|
||||
dirResponse, err := driver(cfg, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If we get nothing back from `go list`, try to make this file into its own ad-hoc package.
|
||||
if len(dirResponse.Packages) == 0 && err == nil {
|
||||
dirResponse.Packages = append(dirResponse.Packages, &Package{
|
||||
ID: "command-line-arguments",
|
||||
PkgPath: query,
|
||||
GoFiles: []string{query},
|
||||
CompiledGoFiles: []string{query},
|
||||
Imports: make(map[string]*Package),
|
||||
})
|
||||
dirResponse.Roots = append(dirResponse.Roots, "command-line-arguments")
|
||||
}
|
||||
// Special case to handle issue #33482:
|
||||
// If this is a file= query for ad-hoc packages where the file only exists on an overlay,
|
||||
// and exists outside of a module, add the file in for the package.
|
||||
if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" ||
|
||||
filepath.ToSlash(dirResponse.Packages[0].PkgPath) == filepath.ToSlash(query)) {
|
||||
if len(dirResponse.Packages[0].GoFiles) == 0 {
|
||||
filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
|
||||
// TODO(matloob): check if the file is outside of a root dir?
|
||||
for path := range cfg.Overlay {
|
||||
if path == filename {
|
||||
dirResponse.Packages[0].Errors = nil
|
||||
dirResponse.Packages[0].GoFiles = []string{path}
|
||||
dirResponse.Packages[0].CompiledGoFiles = []string{path}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return dirResponse, nil
|
||||
}
|
||||
|
||||
func contains(files []string, filename string) bool {
|
||||
for _, f := range files {
|
||||
if f == filename {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// errorSpan attempts to parse a standard `go list` error message
|
||||
// by stripping off the trailing error message.
|
||||
//
|
||||
// It works only on errors whose message is prefixed by colon,
|
||||
// followed by a space (": "). For example:
|
||||
//
|
||||
// attributes.go:13:1: expected 'package', found 'type'
|
||||
//
|
||||
func errorSpan(err Error) span.Span {
|
||||
if err.Pos == "" {
|
||||
input := strings.TrimSpace(err.Msg)
|
||||
msgIndex := strings.Index(input, ": ")
|
||||
if msgIndex < 0 {
|
||||
return span.Parse(input)
|
||||
}
|
||||
return span.Parse(input[:msgIndex])
|
||||
}
|
||||
return span.Parse(err.Pos)
|
||||
}
|
||||
|
||||
// modCacheRegexp splits a path in a module cache into module, module version, and package.
|
||||
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
|
||||
|
||||
|
@ -312,9 +466,7 @@ func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, quer
|
|||
|
||||
startWalk := time.Now()
|
||||
gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug})
|
||||
if debug {
|
||||
log.Printf("%v for walk", time.Since(startWalk))
|
||||
}
|
||||
cfg.Logf("%v for walk", time.Since(startWalk))
|
||||
|
||||
// Weird special case: the top-level package in a module will be in
|
||||
// whatever directory the user checked the repository out into. It's
|
||||
|
@ -327,6 +479,10 @@ func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, quer
|
|||
}
|
||||
|
||||
files, err := ioutil.ReadDir(modRoot)
|
||||
if err != nil {
|
||||
panic(err) // See above.
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
if strings.HasSuffix(f.Name(), ".go") {
|
||||
simpleMatches = append(simpleMatches, rel)
|
||||
|
@ -394,7 +550,7 @@ func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, quer
|
|||
// We're only trying to look at stuff in the module cache, so
|
||||
// disable the network. This should speed things up, and has
|
||||
// prevented errors in at least one case, #28518.
|
||||
tmpCfg.Env = append(append([]string{"GOPROXY=off"}, cfg.Env...))
|
||||
tmpCfg.Env = append([]string{"GOPROXY=off"}, cfg.Env...)
|
||||
|
||||
var err error
|
||||
tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery")
|
||||
|
@ -442,17 +598,29 @@ func roots(cfg *Config) ([]gopathwalk.Root, string, error) {
|
|||
|
||||
var roots []gopathwalk.Root
|
||||
// Always add GOROOT.
|
||||
roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT})
|
||||
roots = append(roots, gopathwalk.Root{
|
||||
Path: filepath.Join(goroot, "/src"),
|
||||
Type: gopathwalk.RootGOROOT,
|
||||
})
|
||||
// If modules are enabled, scan the module dir.
|
||||
if modDir != "" {
|
||||
roots = append(roots, gopathwalk.Root{modDir, gopathwalk.RootCurrentModule})
|
||||
roots = append(roots, gopathwalk.Root{
|
||||
Path: modDir,
|
||||
Type: gopathwalk.RootCurrentModule,
|
||||
})
|
||||
}
|
||||
// Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode.
|
||||
for _, p := range gopath {
|
||||
if modDir != "" {
|
||||
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
|
||||
roots = append(roots, gopathwalk.Root{
|
||||
Path: filepath.Join(p, "/pkg/mod"),
|
||||
Type: gopathwalk.RootModuleCache,
|
||||
})
|
||||
} else {
|
||||
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH})
|
||||
roots = append(roots, gopathwalk.Root{
|
||||
Path: filepath.Join(p, "/src"),
|
||||
Type: gopathwalk.RootGOPATH,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -565,13 +733,13 @@ func otherFiles(p *jsonPackage) [][]string {
|
|||
// golistDriver uses the "go list" command to expand the pattern
|
||||
// words and return metadata for the specified packages. dir may be
|
||||
// "" and env may be nil, as per os/exec.Command.
|
||||
func golistDriver(cfg *Config, words ...string) (*driverResponse, error) {
|
||||
func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driverResponse, error) {
|
||||
// go list uses the following identifiers in ImportPath and Imports:
|
||||
//
|
||||
// "p" -- importable package or main (command)
|
||||
// "q.test" -- q's test executable
|
||||
// "q.test" -- q's test executable
|
||||
// "p [q.test]" -- variant of p as built for q's test executable
|
||||
// "q_test [q.test]" -- q's external test package
|
||||
// "q_test [q.test]" -- q's external test package
|
||||
//
|
||||
// The packages p that are built differently for a test q.test
|
||||
// are q itself, plus any helpers used by the external test q_test,
|
||||
|
@ -606,6 +774,20 @@ func golistDriver(cfg *Config, words ...string) (*driverResponse, error) {
|
|||
return nil, fmt.Errorf("package missing import path: %+v", p)
|
||||
}
|
||||
|
||||
// Work around https://golang.org/issue/33157:
|
||||
// go list -e, when given an absolute path, will find the package contained at
|
||||
// that directory. But when no package exists there, it will return a fake package
|
||||
// with an error and the ImportPath set to the absolute path provided to go list.
|
||||
// Try to convert that absolute path to what its package path would be if it's
|
||||
// contained in a known module or GOPATH entry. This will allow the package to be
|
||||
// properly "reclaimed" when overlays are processed.
|
||||
if filepath.IsAbs(p.ImportPath) && p.Error != nil {
|
||||
pkgPath, ok := getPkgPath(cfg, p.ImportPath, rootsDirs)
|
||||
if ok {
|
||||
p.ImportPath = pkgPath
|
||||
}
|
||||
}
|
||||
|
||||
if old, found := seen[p.ImportPath]; found {
|
||||
if !reflect.DeepEqual(p, old) {
|
||||
return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
|
||||
|
@ -709,6 +891,44 @@ func golistDriver(cfg *Config, words ...string) (*driverResponse, error) {
|
|||
return &response, nil
|
||||
}
|
||||
|
||||
// getPkgPath finds the package path of a directory if it's relative to a root directory.
|
||||
func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) {
|
||||
absDir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
cfg.Logf("error getting absolute path of %s: %v", dir, err)
|
||||
return "", false
|
||||
}
|
||||
for rdir, rpath := range goInfo().rootDirs {
|
||||
absRdir, err := filepath.Abs(rdir)
|
||||
if err != nil {
|
||||
cfg.Logf("error getting absolute path of %s: %v", rdir, err)
|
||||
continue
|
||||
}
|
||||
// Make sure that the directory is in the module,
|
||||
// to avoid creating a path relative to another module.
|
||||
if !strings.HasPrefix(absDir, absRdir) {
|
||||
cfg.Logf("%s does not have prefix %s", absDir, absRdir)
|
||||
continue
|
||||
}
|
||||
// TODO(matloob): This doesn't properly handle symlinks.
|
||||
r, err := filepath.Rel(rdir, dir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if rpath != "" {
|
||||
// We choose only one root even though the directory even it can belong in multiple modules
|
||||
// or GOPATH entries. This is okay because we only need to work with absolute dirs when a
|
||||
// file is missing from disk, for instance when gopls calls go/packages in an overlay.
|
||||
// Once the file is saved, gopls, or the next invocation of the tool will get the correct
|
||||
// result straight from golist.
|
||||
// TODO(matloob): Implement module tiebreaking?
|
||||
return path.Join(rpath, filepath.ToSlash(r)), true
|
||||
}
|
||||
return filepath.ToSlash(r), true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// absJoin absolutizes and flattens the lists of files.
|
||||
func absJoin(dir string, fileses ...[]string) (res []string) {
|
||||
for _, files := range fileses {
|
||||
|
@ -729,7 +949,7 @@ func golistargs(cfg *Config, words []string) []string {
|
|||
fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0),
|
||||
fmt.Sprintf("-test=%t", cfg.Tests),
|
||||
fmt.Sprintf("-export=%t", usesExportData(cfg)),
|
||||
fmt.Sprintf("-deps=%t", cfg.Mode&NeedDeps != 0),
|
||||
fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0),
|
||||
// go list doesn't let you pass -test and -find together,
|
||||
// probably because you'd just get the TestMain.
|
||||
fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0),
|
||||
|
@ -755,11 +975,9 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
|
|||
cmd.Dir = cfg.Dir
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
if debug {
|
||||
defer func(start time.Time) {
|
||||
log.Printf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr)
|
||||
}(time.Now())
|
||||
}
|
||||
defer func(start time.Time) {
|
||||
cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr, stdout)
|
||||
}(time.Now())
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Check for 'go' executable not being found.
|
||||
|
@ -779,6 +997,30 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
|
|||
return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
|
||||
}
|
||||
|
||||
// Related to #24854
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") {
|
||||
return nil, fmt.Errorf("%s", stderr.String())
|
||||
}
|
||||
|
||||
// Is there an error running the C compiler in cgo? This will be reported in the "Error" field
|
||||
// and should be suppressed by go list -e.
|
||||
//
|
||||
// This condition is not perfect yet because the error message can include other error messages than runtime/cgo.
|
||||
isPkgPathRune := func(r rune) bool {
|
||||
// From https://golang.org/ref/spec#Import_declarations:
|
||||
// Implementation restriction: A compiler may restrict ImportPaths to non-empty strings
|
||||
// using only characters belonging to Unicode's L, M, N, P, and S general categories
|
||||
// (the Graphic characters without spaces) and may also exclude the
|
||||
// characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD.
|
||||
return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) &&
|
||||
!strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r)
|
||||
}
|
||||
if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") {
|
||||
if strings.HasPrefix(strings.TrimLeftFunc(stderr.String()[len("# "):], isPkgPathRune), "\n") {
|
||||
return stdout, nil
|
||||
}
|
||||
}
|
||||
|
||||
// This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show
|
||||
// the error in the Err section of stdout in case -e option is provided.
|
||||
// This fix is provided for backwards compatibility.
|
||||
|
@ -788,17 +1030,58 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
|
|||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
||||
// Similar to the previous error, but currently lacks a fix in Go.
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") {
|
||||
output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||
strings.Trim(stderr.String(), "\n"))
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
||||
// Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath.
|
||||
// If the package doesn't exist, put the absolute path of the directory into the error message,
|
||||
// as Go 1.13 list does.
|
||||
const noSuchDirectory = "no such directory"
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) {
|
||||
errstr := stderr.String()
|
||||
abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):])
|
||||
output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||
abspath, strings.Trim(stderr.String(), "\n"))
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
||||
// Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist.
|
||||
// Note that the error message we look for in this case is different that the one looked for above.
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") {
|
||||
output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||
strings.Trim(stderr.String(), "\n"))
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
||||
// Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a
|
||||
// directory outside any module.
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") {
|
||||
output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||
// TODO(matloob): command-line-arguments isn't correct here.
|
||||
"command-line-arguments", strings.Trim(stderr.String(), "\n"))
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
||||
// Another variation of the previous error
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") {
|
||||
output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||
// TODO(matloob): command-line-arguments isn't correct here.
|
||||
"command-line-arguments", strings.Trim(stderr.String(), "\n"))
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
||||
// Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit
|
||||
// status if there's a dependency on a package that doesn't exist. But it should return
|
||||
// a zero exit status and set an error on that package.
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") {
|
||||
// Don't clobber stdout if `go list` actually returned something.
|
||||
if len(stdout.String()) > 0 {
|
||||
return stdout, nil
|
||||
}
|
||||
// try to extract package name from string
|
||||
stderrStr := stderr.String()
|
||||
var importPath string
|
||||
|
@ -832,12 +1115,6 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
|
|||
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr)
|
||||
}
|
||||
|
||||
// debugging
|
||||
if false {
|
||||
fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cmd, args...), stdout)
|
||||
}
|
||||
|
||||
return stdout, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -6,11 +6,9 @@ import (
|
|||
"fmt"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// processGolistOverlay provides rudimentary support for adding
|
||||
|
@ -18,7 +16,7 @@ import (
|
|||
// sometimes incorrect.
|
||||
// TODO(matloob): Handle unsupported cases, including the following:
|
||||
// - determining the correct package to add given a new import path
|
||||
func processGolistOverlay(cfg *Config, response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) {
|
||||
func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func() *goInfo) (modifiedPkgs, needPkgs []string, err error) {
|
||||
havePkgs := make(map[string]string) // importPath -> non-test package ID
|
||||
needPkgsSet := make(map[string]bool)
|
||||
modifiedPkgsSet := make(map[string]bool)
|
||||
|
@ -29,16 +27,20 @@ func processGolistOverlay(cfg *Config, response *responseDeduper) (modifiedPkgs,
|
|||
havePkgs[pkg.PkgPath] = pkg.ID
|
||||
}
|
||||
|
||||
var rootDirs map[string]string
|
||||
var onceGetRootDirs sync.Once
|
||||
// If no new imports are added, it is safe to avoid loading any needPkgs.
|
||||
// Otherwise, it's hard to tell which package is actually being loaded
|
||||
// (due to vendoring) and whether any modified package will show up
|
||||
// in the transitive set of dependencies (because new imports are added,
|
||||
// potentially modifying the transitive set of dependencies).
|
||||
var overlayAddsImports bool
|
||||
|
||||
for opath, contents := range cfg.Overlay {
|
||||
base := filepath.Base(opath)
|
||||
dir := filepath.Dir(opath)
|
||||
var pkg *Package
|
||||
var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant
|
||||
var testVariantOf *Package // if opath is a test file, this is the package it is testing
|
||||
var fileExists bool
|
||||
isTest := strings.HasSuffix(opath, "_test.go")
|
||||
isTestFile := strings.HasSuffix(opath, "_test.go")
|
||||
pkgName, ok := extractPackageName(opath, contents)
|
||||
if !ok {
|
||||
// Don't bother adding a file that doesn't even have a parsable package statement
|
||||
|
@ -47,20 +49,33 @@ func processGolistOverlay(cfg *Config, response *responseDeduper) (modifiedPkgs,
|
|||
}
|
||||
nextPackage:
|
||||
for _, p := range response.dr.Packages {
|
||||
if pkgName != p.Name {
|
||||
if pkgName != p.Name && p.ID != "command-line-arguments" {
|
||||
continue
|
||||
}
|
||||
for _, f := range p.GoFiles {
|
||||
if !sameFile(filepath.Dir(f), dir) {
|
||||
continue
|
||||
}
|
||||
if isTest && !hasTestFiles(p) {
|
||||
// Make sure to capture information on the package's test variant, if needed.
|
||||
if isTestFile && !hasTestFiles(p) {
|
||||
// TODO(matloob): Are there packages other than the 'production' variant
|
||||
// of a package that this can match? This shouldn't match the test main package
|
||||
// because the file is generated in another directory.
|
||||
testVariantOf = p
|
||||
continue nextPackage
|
||||
}
|
||||
if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath {
|
||||
// If we've already seen the test variant,
|
||||
// make sure to label which package it is a test variant of.
|
||||
if hasTestFiles(pkg) {
|
||||
testVariantOf = p
|
||||
continue nextPackage
|
||||
}
|
||||
// If we have already seen the package of which this is a test variant.
|
||||
if hasTestFiles(p) {
|
||||
testVariantOf = pkg
|
||||
}
|
||||
}
|
||||
pkg = p
|
||||
if filepath.Base(f) == base {
|
||||
fileExists = true
|
||||
|
@ -69,37 +84,18 @@ func processGolistOverlay(cfg *Config, response *responseDeduper) (modifiedPkgs,
|
|||
}
|
||||
// The overlay could have included an entirely new package.
|
||||
if pkg == nil {
|
||||
onceGetRootDirs.Do(func() {
|
||||
rootDirs = determineRootDirs(cfg)
|
||||
})
|
||||
// Try to find the module or gopath dir the file is contained in.
|
||||
// Then for modules, add the module opath to the beginning.
|
||||
var pkgPath string
|
||||
for rdir, rpath := range rootDirs {
|
||||
// TODO(matloob): This doesn't properly handle symlinks.
|
||||
r, err := filepath.Rel(rdir, dir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
pkgPath = filepath.ToSlash(r)
|
||||
if rpath != "" {
|
||||
pkgPath = path.Join(rpath, pkgPath)
|
||||
}
|
||||
// We only create one new package even it can belong in multiple modules or GOPATH entries.
|
||||
// This is okay because tools (such as the LSP) that use overlays will recompute the overlay
|
||||
// once the file is saved, and golist will do the right thing.
|
||||
// TODO(matloob): Implement module tiebreaking?
|
||||
pkgPath, ok := getPkgPath(cfg, dir, rootDirs)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if pkgPath == "" {
|
||||
continue
|
||||
}
|
||||
isXTest := strings.HasSuffix(pkgName, "_test")
|
||||
if isXTest {
|
||||
pkgPath += "_test"
|
||||
}
|
||||
id := pkgPath
|
||||
if isTest && !isXTest {
|
||||
if isTestFile && !isXTest {
|
||||
id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath)
|
||||
}
|
||||
// Try to reclaim a package with the same id if it exists in the response.
|
||||
|
@ -115,7 +111,7 @@ func processGolistOverlay(cfg *Config, response *responseDeduper) (modifiedPkgs,
|
|||
response.addPackage(pkg)
|
||||
havePkgs[pkg.PkgPath] = id
|
||||
// Add the production package's sources for a test variant.
|
||||
if isTest && !isXTest && testVariantOf != nil {
|
||||
if isTestFile && !isXTest && testVariantOf != nil {
|
||||
pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...)
|
||||
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...)
|
||||
}
|
||||
|
@ -136,13 +132,18 @@ func processGolistOverlay(cfg *Config, response *responseDeduper) (modifiedPkgs,
|
|||
for _, imp := range imports {
|
||||
_, found := pkg.Imports[imp]
|
||||
if !found {
|
||||
overlayAddsImports = true
|
||||
// TODO(matloob): Handle cases when the following block isn't correct.
|
||||
// These include imports of test variants, imports of vendored packages, etc.
|
||||
// These include imports of vendored packages, etc.
|
||||
id, ok := havePkgs[imp]
|
||||
if !ok {
|
||||
id = imp
|
||||
}
|
||||
pkg.Imports[imp] = &Package{ID: id}
|
||||
// Add dependencies to the non-test variant version of this package as wel.
|
||||
if testVariantOf != nil {
|
||||
testVariantOf.Imports[imp] = &Package{ID: id}
|
||||
}
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
@ -171,9 +172,11 @@ func processGolistOverlay(cfg *Config, response *responseDeduper) (modifiedPkgs,
|
|||
}
|
||||
}
|
||||
|
||||
needPkgs = make([]string, 0, len(needPkgsSet))
|
||||
for pkg := range needPkgsSet {
|
||||
needPkgs = append(needPkgs, pkg)
|
||||
if overlayAddsImports {
|
||||
needPkgs = make([]string, 0, len(needPkgsSet))
|
||||
for pkg := range needPkgsSet {
|
||||
needPkgs = append(needPkgs, pkg)
|
||||
}
|
||||
}
|
||||
modifiedPkgs = make([]string, 0, len(modifiedPkgsSet))
|
||||
for pkg := range modifiedPkgsSet {
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package packages
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var allModes = []LoadMode{
|
||||
NeedName,
|
||||
NeedFiles,
|
||||
NeedCompiledGoFiles,
|
||||
NeedImports,
|
||||
NeedDeps,
|
||||
NeedExportsFile,
|
||||
NeedTypes,
|
||||
NeedSyntax,
|
||||
NeedTypesInfo,
|
||||
NeedTypesSizes,
|
||||
}
|
||||
|
||||
var modeStrings = []string{
|
||||
"NeedName",
|
||||
"NeedFiles",
|
||||
"NeedCompiledGoFiles",
|
||||
"NeedImports",
|
||||
"NeedDeps",
|
||||
"NeedExportsFile",
|
||||
"NeedTypes",
|
||||
"NeedSyntax",
|
||||
"NeedTypesInfo",
|
||||
"NeedTypesSizes",
|
||||
}
|
||||
|
||||
func (mod LoadMode) String() string {
|
||||
m := mod
|
||||
if m == 0 {
|
||||
return fmt.Sprintf("LoadMode(0)")
|
||||
}
|
||||
var out []string
|
||||
for i, x := range allModes {
|
||||
if x > m {
|
||||
break
|
||||
}
|
||||
if (m & x) != 0 {
|
||||
out = append(out, modeStrings[i])
|
||||
m = m ^ x
|
||||
}
|
||||
}
|
||||
if m != 0 {
|
||||
out = append(out, "Unknown")
|
||||
}
|
||||
return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|"))
|
||||
}
|
|
@ -48,8 +48,7 @@ const (
|
|||
// "placeholder" Packages with only the ID set.
|
||||
NeedImports
|
||||
|
||||
// NeedDeps adds the fields requested by the LoadMode in the packages in Imports. If NeedImports
|
||||
// is not set NeedDeps has no effect.
|
||||
// NeedDeps adds the fields requested by the LoadMode in the packages in Imports.
|
||||
NeedDeps
|
||||
|
||||
// NeedExportsFile adds ExportsFile.
|
||||
|
@ -75,7 +74,7 @@ const (
|
|||
|
||||
// Deprecated: LoadImports exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
LoadImports = LoadFiles | NeedImports | NeedDeps
|
||||
LoadImports = LoadFiles | NeedImports
|
||||
|
||||
// Deprecated: LoadTypes exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
|
@ -87,7 +86,7 @@ const (
|
|||
|
||||
// Deprecated: LoadAllSyntax exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
LoadAllSyntax = LoadSyntax
|
||||
LoadAllSyntax = LoadSyntax | NeedDeps
|
||||
)
|
||||
|
||||
// A Config specifies details about how packages should be loaded.
|
||||
|
@ -103,6 +102,12 @@ type Config struct {
|
|||
// If Context is nil, the load cannot be cancelled.
|
||||
Context context.Context
|
||||
|
||||
// Logf is the logger for the config.
|
||||
// If the user provides a logger, debug logging is enabled.
|
||||
// If the GOPACKAGESDEBUG environment variable is set to true,
|
||||
// but the logger is nil, default to log.Printf.
|
||||
Logf func(format string, args ...interface{})
|
||||
|
||||
// Dir is the directory in which to run the build system's query tool
|
||||
// that provides information about the packages.
|
||||
// If Dir is empty, the tool is run in the current directory.
|
||||
|
@ -410,11 +415,13 @@ type loader struct {
|
|||
parseCacheMu sync.Mutex
|
||||
exportMu sync.Mutex // enforces mutual exclusion of exportdata operations
|
||||
|
||||
// TODO(matloob): Add an implied mode here and use that instead of mode.
|
||||
// Implied mode would contain all the fields we need the data for so we can
|
||||
// get the actually requested fields. We'll zero them out before returning
|
||||
// packages to the user. This will make it easier for us to get the conditions
|
||||
// where we need certain modes right.
|
||||
// Config.Mode contains the implied mode (see impliedLoadMode).
|
||||
// Implied mode contains all the fields we need the data for.
|
||||
// In requestedMode there are the actually requested fields.
|
||||
// We'll zero them out before returning packages to the user.
|
||||
// This makes it easier for us to get the conditions where
|
||||
// we need certain modes right.
|
||||
requestedMode LoadMode
|
||||
}
|
||||
|
||||
type parseValue struct {
|
||||
|
@ -429,6 +436,17 @@ func newLoader(cfg *Config) *loader {
|
|||
}
|
||||
if cfg != nil {
|
||||
ld.Config = *cfg
|
||||
// If the user has provided a logger, use it.
|
||||
ld.Config.Logf = cfg.Logf
|
||||
}
|
||||
if ld.Config.Logf == nil {
|
||||
// If the GOPACKAGESDEBUG environment variable is set to true,
|
||||
// but the user has not provided a logger, default to log.Printf.
|
||||
if debug {
|
||||
ld.Config.Logf = log.Printf
|
||||
} else {
|
||||
ld.Config.Logf = func(format string, args ...interface{}) {}
|
||||
}
|
||||
}
|
||||
if ld.Config.Mode == 0 {
|
||||
ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility.
|
||||
|
@ -445,7 +463,11 @@ func newLoader(cfg *Config) *loader {
|
|||
}
|
||||
}
|
||||
|
||||
if ld.Mode&NeedTypes != 0 {
|
||||
// Save the actually requested fields. We'll zero them out before returning packages to the user.
|
||||
ld.requestedMode = ld.Mode
|
||||
ld.Mode = impliedLoadMode(ld.Mode)
|
||||
|
||||
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
|
||||
if ld.Fset == nil {
|
||||
ld.Fset = token.NewFileSet()
|
||||
}
|
||||
|
@ -459,6 +481,7 @@ func newLoader(cfg *Config) *loader {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ld
|
||||
}
|
||||
|
||||
|
@ -479,8 +502,8 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
|||
}
|
||||
lpkg := &loaderPackage{
|
||||
Package: pkg,
|
||||
needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0,
|
||||
needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0 ||
|
||||
needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0,
|
||||
needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0 ||
|
||||
len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files
|
||||
pkg.ExportFile == "" && pkg.PkgPath != "unsafe",
|
||||
}
|
||||
|
@ -527,28 +550,31 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
|||
lpkg.color = grey
|
||||
stack = append(stack, lpkg) // push
|
||||
stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
|
||||
lpkg.Imports = make(map[string]*Package, len(stubs))
|
||||
for importPath, ipkg := range stubs {
|
||||
var importErr error
|
||||
imp := ld.pkgs[ipkg.ID]
|
||||
if imp == nil {
|
||||
// (includes package "C" when DisableCgo)
|
||||
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
|
||||
} else if imp.color == grey {
|
||||
importErr = fmt.Errorf("import cycle: %s", stack)
|
||||
}
|
||||
if importErr != nil {
|
||||
if lpkg.importErrors == nil {
|
||||
lpkg.importErrors = make(map[string]error)
|
||||
// If NeedImports isn't set, the imports fields will all be zeroed out.
|
||||
if ld.Mode&NeedImports != 0 {
|
||||
lpkg.Imports = make(map[string]*Package, len(stubs))
|
||||
for importPath, ipkg := range stubs {
|
||||
var importErr error
|
||||
imp := ld.pkgs[ipkg.ID]
|
||||
if imp == nil {
|
||||
// (includes package "C" when DisableCgo)
|
||||
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
|
||||
} else if imp.color == grey {
|
||||
importErr = fmt.Errorf("import cycle: %s", stack)
|
||||
}
|
||||
if importErr != nil {
|
||||
if lpkg.importErrors == nil {
|
||||
lpkg.importErrors = make(map[string]error)
|
||||
}
|
||||
lpkg.importErrors[importPath] = importErr
|
||||
continue
|
||||
}
|
||||
lpkg.importErrors[importPath] = importErr
|
||||
continue
|
||||
}
|
||||
|
||||
if visit(imp) {
|
||||
lpkg.needsrc = true
|
||||
if visit(imp) {
|
||||
lpkg.needsrc = true
|
||||
}
|
||||
lpkg.Imports[importPath] = imp.Package
|
||||
}
|
||||
lpkg.Imports[importPath] = imp.Package
|
||||
}
|
||||
if lpkg.needsrc {
|
||||
srcPkgs = append(srcPkgs, lpkg)
|
||||
|
@ -562,7 +588,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
|||
return lpkg.needsrc
|
||||
}
|
||||
|
||||
if ld.Mode&(NeedImports|NeedDeps) == 0 {
|
||||
if ld.Mode&NeedImports == 0 {
|
||||
// We do this to drop the stub import packages that we are not even going to try to resolve.
|
||||
for _, lpkg := range initial {
|
||||
lpkg.Imports = nil
|
||||
|
@ -573,7 +599,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
|||
visit(lpkg)
|
||||
}
|
||||
}
|
||||
if ld.Mode&NeedDeps != 0 { // TODO(matloob): This is only the case if NeedTypes is also set, right?
|
||||
if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 {
|
||||
for _, lpkg := range srcPkgs {
|
||||
// Complete type information is required for the
|
||||
// immediate dependencies of each source package.
|
||||
|
@ -583,9 +609,9 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
// Load type data if needed, starting at
|
||||
// Load type data and syntax if needed, starting at
|
||||
// the initial packages (roots of the import DAG).
|
||||
if ld.Mode&NeedTypes != 0 {
|
||||
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
|
||||
var wg sync.WaitGroup
|
||||
for _, lpkg := range initial {
|
||||
wg.Add(1)
|
||||
|
@ -598,54 +624,44 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
|||
}
|
||||
|
||||
result := make([]*Package, len(initial))
|
||||
importPlaceholders := make(map[string]*Package)
|
||||
for i, lpkg := range initial {
|
||||
result[i] = lpkg.Package
|
||||
}
|
||||
for i := range ld.pkgs {
|
||||
// Clear all unrequested fields, for extra de-Hyrum-ization.
|
||||
if ld.Mode&NeedName == 0 {
|
||||
if ld.requestedMode&NeedName == 0 {
|
||||
ld.pkgs[i].Name = ""
|
||||
ld.pkgs[i].PkgPath = ""
|
||||
}
|
||||
if ld.Mode&NeedFiles == 0 {
|
||||
if ld.requestedMode&NeedFiles == 0 {
|
||||
ld.pkgs[i].GoFiles = nil
|
||||
ld.pkgs[i].OtherFiles = nil
|
||||
}
|
||||
if ld.Mode&NeedCompiledGoFiles == 0 {
|
||||
if ld.requestedMode&NeedCompiledGoFiles == 0 {
|
||||
ld.pkgs[i].CompiledGoFiles = nil
|
||||
}
|
||||
if ld.Mode&NeedImports == 0 {
|
||||
if ld.requestedMode&NeedImports == 0 {
|
||||
ld.pkgs[i].Imports = nil
|
||||
}
|
||||
if ld.Mode&NeedExportsFile == 0 {
|
||||
if ld.requestedMode&NeedExportsFile == 0 {
|
||||
ld.pkgs[i].ExportFile = ""
|
||||
}
|
||||
if ld.Mode&NeedTypes == 0 {
|
||||
if ld.requestedMode&NeedTypes == 0 {
|
||||
ld.pkgs[i].Types = nil
|
||||
ld.pkgs[i].Fset = nil
|
||||
ld.pkgs[i].IllTyped = false
|
||||
}
|
||||
if ld.Mode&NeedSyntax == 0 {
|
||||
if ld.requestedMode&NeedSyntax == 0 {
|
||||
ld.pkgs[i].Syntax = nil
|
||||
}
|
||||
if ld.Mode&NeedTypesInfo == 0 {
|
||||
if ld.requestedMode&NeedTypesInfo == 0 {
|
||||
ld.pkgs[i].TypesInfo = nil
|
||||
}
|
||||
if ld.Mode&NeedTypesSizes == 0 {
|
||||
if ld.requestedMode&NeedTypesSizes == 0 {
|
||||
ld.pkgs[i].TypesSizes = nil
|
||||
}
|
||||
if ld.Mode&NeedDeps == 0 {
|
||||
for j, pkg := range ld.pkgs[i].Imports {
|
||||
ph, ok := importPlaceholders[pkg.ID]
|
||||
if !ok {
|
||||
ph = &Package{ID: pkg.ID}
|
||||
importPlaceholders[pkg.ID] = ph
|
||||
}
|
||||
ld.pkgs[i].Imports[j] = ph
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
|
@ -666,7 +682,6 @@ func (ld *loader) loadRecursive(lpkg *loaderPackage) {
|
|||
}(imp)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
ld.loadPackage(lpkg)
|
||||
})
|
||||
}
|
||||
|
@ -755,12 +770,23 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
lpkg.Errors = append(lpkg.Errors, errs...)
|
||||
}
|
||||
|
||||
if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" {
|
||||
// The config requested loading sources and types, but sources are missing.
|
||||
// Add an error to the package and fall back to loading from export data.
|
||||
appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError})
|
||||
ld.loadFromExportData(lpkg)
|
||||
return // can't get syntax trees for this package
|
||||
}
|
||||
|
||||
files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
|
||||
for _, err := range errs {
|
||||
appendError(err)
|
||||
}
|
||||
|
||||
lpkg.Syntax = files
|
||||
if ld.Config.Mode&NeedTypes == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
lpkg.TypesInfo = &types.Info{
|
||||
Types: make(map[ast.Expr]types.TypeAndValue),
|
||||
|
@ -793,7 +819,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
if ipkg.Types != nil && ipkg.Types.Complete() {
|
||||
return ipkg.Types, nil
|
||||
}
|
||||
log.Fatalf("internal error: nil Pkg importing %q from %q", path, lpkg)
|
||||
log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg)
|
||||
panic("unreachable")
|
||||
})
|
||||
|
||||
|
@ -804,7 +830,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
// Type-check bodies of functions only in non-initial packages.
|
||||
// Example: for import graph A->B->C and initial packages {A,C},
|
||||
// we can ignore function bodies in B.
|
||||
IgnoreFuncBodies: (ld.Mode&(NeedDeps|NeedTypesInfo) == 0) && !lpkg.initial,
|
||||
IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial,
|
||||
|
||||
Error: appendError,
|
||||
Sizes: ld.sizes,
|
||||
|
@ -1066,6 +1092,25 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
|||
return tpkg, nil
|
||||
}
|
||||
|
||||
func usesExportData(cfg *Config) bool {
|
||||
return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedTypesInfo == 0
|
||||
// impliedLoadMode returns loadMode with its dependencies.
|
||||
func impliedLoadMode(loadMode LoadMode) LoadMode {
|
||||
if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 {
|
||||
// If NeedTypesInfo, go/packages needs to do typechecking itself so it can
|
||||
// associate type info with the AST. To do so, we need the export data
|
||||
// for dependencies, which means we need to ask for the direct dependencies.
|
||||
// NeedImports is used to ask for the direct dependencies.
|
||||
loadMode |= NeedImports
|
||||
}
|
||||
|
||||
if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 {
|
||||
// With NeedDeps we need to load at least direct dependencies.
|
||||
// NeedImports is used to ask for the direct dependencies.
|
||||
loadMode |= NeedImports
|
||||
}
|
||||
|
||||
return loadMode
|
||||
}
|
||||
|
||||
func usesExportData(cfg *Config) bool {
|
||||
return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
|
||||
}
|
||||
|
|
|
@ -376,7 +376,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
|
|||
return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
|
||||
}
|
||||
|
||||
// abtraction of *types.{Pointer,Slice,Array,Chan,Map}
|
||||
// abstraction of *types.{Pointer,Slice,Array,Chan,Map}
|
||||
type hasElem interface {
|
||||
Elem() types.Type
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/fastwalk"
|
||||
)
|
||||
|
@ -59,24 +60,38 @@ func SrcDirsRoots(ctx *build.Context) []Root {
|
|||
// paths of the containing source directory and the package directory.
|
||||
// add will be called concurrently.
|
||||
func Walk(roots []Root, add func(root Root, dir string), opts Options) {
|
||||
WalkSkip(roots, add, func(Root, string) bool { return false }, opts)
|
||||
}
|
||||
|
||||
// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
|
||||
// For each package found, add will be called (concurrently) with the absolute
|
||||
// paths of the containing source directory and the package directory.
|
||||
// For each directory that will be scanned, skip will be called (concurrently)
|
||||
// with the absolute paths of the containing source directory and the directory.
|
||||
// If skip returns false on a directory it will be processed.
|
||||
// add will be called concurrently.
|
||||
// skip will be called concurrently.
|
||||
func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) {
|
||||
for _, root := range roots {
|
||||
walkDir(root, add, opts)
|
||||
walkDir(root, add, skip, opts)
|
||||
}
|
||||
}
|
||||
|
||||
func walkDir(root Root, add func(Root, string), opts Options) {
|
||||
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
|
||||
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
|
||||
if opts.Debug {
|
||||
log.Printf("skipping nonexistant directory: %v", root.Path)
|
||||
log.Printf("skipping nonexistent directory: %v", root.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
start := time.Now()
|
||||
if opts.Debug {
|
||||
log.Printf("scanning %s", root.Path)
|
||||
log.Printf("gopathwalk: scanning %s", root.Path)
|
||||
}
|
||||
w := &walker{
|
||||
root: root,
|
||||
add: add,
|
||||
skip: skip,
|
||||
opts: opts,
|
||||
}
|
||||
w.init()
|
||||
|
@ -85,15 +100,16 @@ func walkDir(root Root, add func(Root, string), opts Options) {
|
|||
}
|
||||
|
||||
if opts.Debug {
|
||||
log.Printf("scanned %s", root.Path)
|
||||
log.Printf("gopathwalk: scanned %s in %v", root.Path, time.Since(start))
|
||||
}
|
||||
}
|
||||
|
||||
// walker is the callback for fastwalk.Walk.
|
||||
type walker struct {
|
||||
root Root // The source directory to scan.
|
||||
add func(Root, string) // The callback that will be invoked for every possible Go package dir.
|
||||
opts Options // Options passed to Walk by the user.
|
||||
root Root // The source directory to scan.
|
||||
add func(Root, string) // The callback that will be invoked for every possible Go package dir.
|
||||
skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true.
|
||||
opts Options // Options passed to Walk by the user.
|
||||
|
||||
ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files.
|
||||
}
|
||||
|
@ -151,12 +167,16 @@ func (w *walker) getIgnoredDirs(path string) []string {
|
|||
return ignoredDirs
|
||||
}
|
||||
|
||||
func (w *walker) shouldSkipDir(fi os.FileInfo) bool {
|
||||
func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool {
|
||||
for _, ignoredDir := range w.ignoredDirs {
|
||||
if os.SameFile(fi, ignoredDir) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if w.skip != nil {
|
||||
// Check with the user specified callback.
|
||||
return w.skip(w.root, dir)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -184,7 +204,7 @@ func (w *walker) walk(path string, typ os.FileMode) error {
|
|||
return filepath.SkipDir
|
||||
}
|
||||
fi, err := os.Lstat(path)
|
||||
if err == nil && w.shouldSkipDir(fi) {
|
||||
if err == nil && w.shouldSkipDir(fi, path) {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
|
@ -224,7 +244,7 @@ func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool {
|
|||
if !ts.IsDir() {
|
||||
return false
|
||||
}
|
||||
if w.shouldSkipDir(ts) {
|
||||
if w.shouldSkipDir(ts, dir) {
|
||||
return false
|
||||
}
|
||||
// Check for symlink loops by statting each directory component
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"fix.go",
|
||||
"imports.go",
|
||||
"mod.go",
|
||||
"mod_cache.go",
|
||||
"sortimports.go",
|
||||
"zstdlib.go",
|
||||
],
|
||||
importmap = "k8s.io/kops/vendor/golang.org/x/tools/internal/imports",
|
||||
importpath = "golang.org/x/tools/internal/imports",
|
||||
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||
deps = [
|
||||
"//vendor/golang.org/x/tools/go/ast/astutil:go_default_library",
|
||||
"//vendor/golang.org/x/tools/go/packages:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/gopathwalk:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/module:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/semver:go_default_library",
|
||||
],
|
||||
)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,397 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run mkstdlib.go
|
||||
|
||||
// Package imports implements a Go pretty-printer (like package "go/format")
|
||||
// that also adds or removes import statements as necessary.
|
||||
package imports
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/printer"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
)
|
||||
|
||||
// Options is golang.org/x/tools/imports.Options with extra internal-only options.
|
||||
type Options struct {
|
||||
Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state.
|
||||
|
||||
Fragment bool // Accept fragment of a source file (no package statement)
|
||||
AllErrors bool // Report all errors (not just the first 10 on different lines)
|
||||
|
||||
Comments bool // Print comments (true if nil *Options provided)
|
||||
TabIndent bool // Use tabs for indent (true if nil *Options provided)
|
||||
TabWidth int // Tab width (8 if nil *Options provided)
|
||||
|
||||
FormatOnly bool // Disable the insertion and deletion of imports
|
||||
}
|
||||
|
||||
// Process implements golang.org/x/tools/imports.Process with explicit context in env.
|
||||
func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
|
||||
src, opt, err = initialize(filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileSet := token.NewFileSet()
|
||||
file, adjust, err := parse(fileSet, filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !opt.FormatOnly {
|
||||
if err := fixImports(fileSet, file, filename, opt.Env); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return formatFile(fileSet, file, src, adjust, opt)
|
||||
}
|
||||
|
||||
// FixImports returns a list of fixes to the imports that, when applied,
|
||||
// will leave the imports in the same state as Process.
|
||||
//
|
||||
// Note that filename's directory influences which imports can be chosen,
|
||||
// so it is important that filename be accurate.
|
||||
func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
|
||||
src, opt, err = initialize(filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileSet := token.NewFileSet()
|
||||
file, _, err := parse(fileSet, filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return getFixes(fileSet, file, filename, opt.Env)
|
||||
}
|
||||
|
||||
// ApplyFix will apply all of the fixes to the file and format it.
|
||||
func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options) (formatted []byte, err error) {
|
||||
src, opt, err = initialize(filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileSet := token.NewFileSet()
|
||||
file, adjust, err := parse(fileSet, filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Apply the fixes to the file.
|
||||
apply(fileSet, file, fixes)
|
||||
|
||||
return formatFile(fileSet, file, src, adjust, opt)
|
||||
}
|
||||
|
||||
// GetAllCandidates gets all of the standard library candidate packages to import in
|
||||
// sorted order on import path.
|
||||
func GetAllCandidates(filename string, opt *Options) (pkgs []ImportFix, err error) {
|
||||
_, opt, err = initialize(filename, nil, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return getAllCandidates(filename, opt.Env)
|
||||
}
|
||||
|
||||
// GetPackageExports returns all known packages with name pkg and their exports.
|
||||
func GetPackageExports(pkg, filename string, opt *Options) (exports []PackageExport, err error) {
|
||||
_, opt, err = initialize(filename, nil, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return getPackageExports(pkg, filename, opt.Env)
|
||||
}
|
||||
|
||||
// initialize sets the values for opt and src.
|
||||
// If they are provided, they are not changed. Otherwise opt is set to the
|
||||
// default values and src is read from the file system.
|
||||
func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, error) {
|
||||
// Use defaults if opt is nil.
|
||||
if opt == nil {
|
||||
opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
|
||||
}
|
||||
|
||||
// Set the env if the user has not provided it.
|
||||
if opt.Env == nil {
|
||||
opt.Env = &ProcessEnv{
|
||||
GOPATH: build.Default.GOPATH,
|
||||
GOROOT: build.Default.GOROOT,
|
||||
}
|
||||
}
|
||||
|
||||
// Set the logger if the user has not provided it.
|
||||
if opt.Env.Logf == nil {
|
||||
opt.Env.Logf = log.Printf
|
||||
}
|
||||
|
||||
if src == nil {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
src = b
|
||||
}
|
||||
|
||||
return src, opt, nil
|
||||
}
|
||||
|
||||
func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
|
||||
mergeImports(opt.Env, fileSet, file)
|
||||
sortImports(opt.Env, fileSet, file)
|
||||
imps := astutil.Imports(fileSet, file)
|
||||
var spacesBefore []string // import paths we need spaces before
|
||||
for _, impSection := range imps {
|
||||
// Within each block of contiguous imports, see if any
|
||||
// import lines are in different group numbers. If so,
|
||||
// we'll need to put a space between them so it's
|
||||
// compatible with gofmt.
|
||||
lastGroup := -1
|
||||
for _, importSpec := range impSection {
|
||||
importPath, _ := strconv.Unquote(importSpec.Path.Value)
|
||||
groupNum := importGroup(opt.Env, importPath)
|
||||
if groupNum != lastGroup && lastGroup != -1 {
|
||||
spacesBefore = append(spacesBefore, importPath)
|
||||
}
|
||||
lastGroup = groupNum
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
printerMode := printer.UseSpaces
|
||||
if opt.TabIndent {
|
||||
printerMode |= printer.TabIndent
|
||||
}
|
||||
printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := printConfig.Fprint(&buf, fileSet, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := buf.Bytes()
|
||||
if adjust != nil {
|
||||
out = adjust(src, out)
|
||||
}
|
||||
if len(spacesBefore) > 0 {
|
||||
out, err = addImportSpaces(bytes.NewReader(out), spacesBefore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
out, err = format.Source(out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// parse parses src, which was read from filename,
|
||||
// as a Go source file or statement list.
|
||||
func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
|
||||
parserMode := parser.Mode(0)
|
||||
if opt.Comments {
|
||||
parserMode |= parser.ParseComments
|
||||
}
|
||||
if opt.AllErrors {
|
||||
parserMode |= parser.AllErrors
|
||||
}
|
||||
|
||||
// Try as whole source file.
|
||||
file, err := parser.ParseFile(fset, filename, src, parserMode)
|
||||
if err == nil {
|
||||
return file, nil, nil
|
||||
}
|
||||
// If the error is that the source file didn't begin with a
|
||||
// package line and we accept fragmented input, fall through to
|
||||
// try as a source fragment. Stop and return on any other error.
|
||||
if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// If this is a declaration list, make it a source file
|
||||
// by inserting a package clause.
|
||||
// Insert using a ;, not a newline, so that parse errors are on
|
||||
// the correct line.
|
||||
const prefix = "package main;"
|
||||
psrc := append([]byte(prefix), src...)
|
||||
file, err = parser.ParseFile(fset, filename, psrc, parserMode)
|
||||
if err == nil {
|
||||
// Gofmt will turn the ; into a \n.
|
||||
// Do that ourselves now and update the file contents,
|
||||
// so that positions and line numbers are correct going forward.
|
||||
psrc[len(prefix)-1] = '\n'
|
||||
fset.File(file.Package).SetLinesForContent(psrc)
|
||||
|
||||
// If a main function exists, we will assume this is a main
|
||||
// package and leave the file.
|
||||
if containsMainFunc(file) {
|
||||
return file, nil, nil
|
||||
}
|
||||
|
||||
adjust := func(orig, src []byte) []byte {
|
||||
// Remove the package clause.
|
||||
src = src[len(prefix):]
|
||||
return matchSpace(orig, src)
|
||||
}
|
||||
return file, adjust, nil
|
||||
}
|
||||
// If the error is that the source file didn't begin with a
|
||||
// declaration, fall through to try as a statement list.
|
||||
// Stop and return on any other error.
|
||||
if !strings.Contains(err.Error(), "expected declaration") {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// If this is a statement list, make it a source file
|
||||
// by inserting a package clause and turning the list
|
||||
// into a function body. This handles expressions too.
|
||||
// Insert using a ;, not a newline, so that the line numbers
|
||||
// in fsrc match the ones in src.
|
||||
fsrc := append(append([]byte("package p; func _() {"), src...), '}')
|
||||
file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
|
||||
if err == nil {
|
||||
adjust := func(orig, src []byte) []byte {
|
||||
// Remove the wrapping.
|
||||
// Gofmt has turned the ; into a \n\n.
|
||||
src = src[len("package p\n\nfunc _() {"):]
|
||||
src = src[:len(src)-len("}\n")]
|
||||
// Gofmt has also indented the function body one level.
|
||||
// Remove that indent.
|
||||
src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
|
||||
return matchSpace(orig, src)
|
||||
}
|
||||
return file, adjust, nil
|
||||
}
|
||||
|
||||
// Failed, and out of options.
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// containsMainFunc checks if a file contains a function declaration with the
|
||||
// function signature 'func main()'
|
||||
func containsMainFunc(file *ast.File) bool {
|
||||
for _, decl := range file.Decls {
|
||||
if f, ok := decl.(*ast.FuncDecl); ok {
|
||||
if f.Name.Name != "main" {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(f.Type.Params.List) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if f.Type.Results != nil && len(f.Type.Results.List) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func cutSpace(b []byte) (before, middle, after []byte) {
|
||||
i := 0
|
||||
for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') {
|
||||
i++
|
||||
}
|
||||
j := len(b)
|
||||
for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') {
|
||||
j--
|
||||
}
|
||||
if i <= j {
|
||||
return b[:i], b[i:j], b[j:]
|
||||
}
|
||||
return nil, nil, b[j:]
|
||||
}
|
||||
|
||||
// matchSpace reformats src to use the same space context as orig.
|
||||
// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src.
|
||||
// 2) matchSpace copies the indentation of the first non-blank line in orig
|
||||
// to every non-blank line in src.
|
||||
// 3) matchSpace copies the trailing space from orig and uses it in place
|
||||
// of src's trailing space.
|
||||
func matchSpace(orig []byte, src []byte) []byte {
|
||||
before, _, after := cutSpace(orig)
|
||||
i := bytes.LastIndex(before, []byte{'\n'})
|
||||
before, indent := before[:i+1], before[i+1:]
|
||||
|
||||
_, src, _ = cutSpace(src)
|
||||
|
||||
var b bytes.Buffer
|
||||
b.Write(before)
|
||||
for len(src) > 0 {
|
||||
line := src
|
||||
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||
line, src = line[:i+1], line[i+1:]
|
||||
} else {
|
||||
src = nil
|
||||
}
|
||||
if len(line) > 0 && line[0] != '\n' { // not blank
|
||||
b.Write(indent)
|
||||
}
|
||||
b.Write(line)
|
||||
}
|
||||
b.Write(after)
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`)
|
||||
|
||||
func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) {
|
||||
var out bytes.Buffer
|
||||
in := bufio.NewReader(r)
|
||||
inImports := false
|
||||
done := false
|
||||
for {
|
||||
s, err := in.ReadString('\n')
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !inImports && !done && strings.HasPrefix(s, "import") {
|
||||
inImports = true
|
||||
}
|
||||
if inImports && (strings.HasPrefix(s, "var") ||
|
||||
strings.HasPrefix(s, "func") ||
|
||||
strings.HasPrefix(s, "const") ||
|
||||
strings.HasPrefix(s, "type")) {
|
||||
done = true
|
||||
inImports = false
|
||||
}
|
||||
if inImports && len(breaks) > 0 {
|
||||
if m := impLine.FindStringSubmatch(s); m != nil {
|
||||
if m[1] == breaks[0] {
|
||||
out.WriteByte('\n')
|
||||
breaks = breaks[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprint(&out, s)
|
||||
}
|
||||
return out.Bytes(), nil
|
||||
}
|
|
@ -0,0 +1,643 @@
|
|||
package imports
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
"golang.org/x/tools/internal/module"
|
||||
"golang.org/x/tools/internal/semver"
|
||||
)
|
||||
|
||||
// ModuleResolver implements resolver for modules using the go command as little
|
||||
// as feasible.
|
||||
type ModuleResolver struct {
|
||||
env *ProcessEnv
|
||||
moduleCacheDir string
|
||||
dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory.
|
||||
|
||||
Initialized bool
|
||||
Main *ModuleJSON
|
||||
ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path...
|
||||
ModsByDir []*ModuleJSON // ...or Dir.
|
||||
|
||||
// moduleCacheCache stores information about the module cache.
|
||||
moduleCacheCache *dirInfoCache
|
||||
otherCache *dirInfoCache
|
||||
}
|
||||
|
||||
type ModuleJSON struct {
|
||||
Path string // module path
|
||||
Replace *ModuleJSON // replaced by this module
|
||||
Main bool // is this the main module?
|
||||
Dir string // directory holding files for this module, if any
|
||||
GoMod string // path to go.mod file for this module, if any
|
||||
GoVersion string // go version used in module
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) init() error {
|
||||
if r.Initialized {
|
||||
return nil
|
||||
}
|
||||
mainMod, vendorEnabled, err := vendorEnabled(r.env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mainMod != nil && vendorEnabled {
|
||||
// Vendor mode is on, so all the non-Main modules are irrelevant,
|
||||
// and we need to search /vendor for everything.
|
||||
r.Main = mainMod
|
||||
r.dummyVendorMod = &ModuleJSON{
|
||||
Path: "",
|
||||
Dir: filepath.Join(mainMod.Dir, "vendor"),
|
||||
}
|
||||
r.ModsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod}
|
||||
r.ModsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod}
|
||||
} else {
|
||||
// Vendor mode is off, so run go list -m ... to find everything.
|
||||
r.initAllMods()
|
||||
}
|
||||
|
||||
r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod")
|
||||
|
||||
sort.Slice(r.ModsByModPath, func(i, j int) bool {
|
||||
count := func(x int) int {
|
||||
return strings.Count(r.ModsByModPath[x].Path, "/")
|
||||
}
|
||||
return count(j) < count(i) // descending order
|
||||
})
|
||||
sort.Slice(r.ModsByDir, func(i, j int) bool {
|
||||
count := func(x int) int {
|
||||
return strings.Count(r.ModsByDir[x].Dir, "/")
|
||||
}
|
||||
return count(j) < count(i) // descending order
|
||||
})
|
||||
|
||||
if r.moduleCacheCache == nil {
|
||||
r.moduleCacheCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
}
|
||||
}
|
||||
if r.otherCache == nil {
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
}
|
||||
}
|
||||
r.Initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) initAllMods() error {
|
||||
stdout, err := r.env.invokeGo("list", "-m", "-json", "...")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for dec := json.NewDecoder(stdout); dec.More(); {
|
||||
mod := &ModuleJSON{}
|
||||
if err := dec.Decode(mod); err != nil {
|
||||
return err
|
||||
}
|
||||
if mod.Dir == "" {
|
||||
if r.env.Debug {
|
||||
r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path)
|
||||
}
|
||||
// Can't do anything with a module that's not downloaded.
|
||||
continue
|
||||
}
|
||||
r.ModsByModPath = append(r.ModsByModPath, mod)
|
||||
r.ModsByDir = append(r.ModsByDir, mod)
|
||||
if mod.Main {
|
||||
r.Main = mod
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) ClearForNewScan() {
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) ClearForNewMod() {
|
||||
env := r.env
|
||||
*r = ModuleResolver{
|
||||
env: env,
|
||||
}
|
||||
r.init()
|
||||
}
|
||||
|
||||
// findPackage returns the module and directory that contains the package at
|
||||
// the given import path, or returns nil, "" if no module is in scope.
|
||||
func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) {
|
||||
// This can't find packages in the stdlib, but that's harmless for all
|
||||
// the existing code paths.
|
||||
for _, m := range r.ModsByModPath {
|
||||
if !strings.HasPrefix(importPath, m.Path) {
|
||||
continue
|
||||
}
|
||||
pathInModule := importPath[len(m.Path):]
|
||||
pkgDir := filepath.Join(m.Dir, pathInModule)
|
||||
if r.dirIsNestedModule(pkgDir, m) {
|
||||
continue
|
||||
}
|
||||
|
||||
if info, ok := r.cacheLoad(pkgDir); ok {
|
||||
if loaded, err := info.reachedStatus(nameLoaded); loaded {
|
||||
if err != nil {
|
||||
continue // No package in this dir.
|
||||
}
|
||||
return m, pkgDir
|
||||
}
|
||||
if scanned, err := info.reachedStatus(directoryScanned); scanned && err != nil {
|
||||
continue // Dir is unreadable, etc.
|
||||
}
|
||||
// This is slightly wrong: a directory doesn't have to have an
|
||||
// importable package to count as a package for package-to-module
|
||||
// resolution. package main or _test files should count but
|
||||
// don't.
|
||||
// TODO(heschi): fix this.
|
||||
if _, err := r.cachePackageName(info); err == nil {
|
||||
return m, pkgDir
|
||||
}
|
||||
}
|
||||
|
||||
// Not cached. Read the filesystem.
|
||||
pkgFiles, err := ioutil.ReadDir(pkgDir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// A module only contains a package if it has buildable go
|
||||
// files in that directory. If not, it could be provided by an
|
||||
// outer module. See #29736.
|
||||
for _, fi := range pkgFiles {
|
||||
if ok, _ := r.env.buildContext().MatchFile(pkgDir, fi.Name()); ok {
|
||||
return m, pkgDir
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheLoad(dir string) (directoryPackageInfo, bool) {
|
||||
if info, ok := r.moduleCacheCache.Load(dir); ok {
|
||||
return info, ok
|
||||
}
|
||||
return r.otherCache.Load(dir)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheStore(info directoryPackageInfo) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
r.moduleCacheCache.Store(info.dir, info)
|
||||
} else {
|
||||
r.otherCache.Store(info.dir, info)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheKeys() []string {
|
||||
return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...)
|
||||
}
|
||||
|
||||
// cachePackageName caches the package name for a dir already in the cache.
|
||||
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
return r.moduleCacheCache.CachePackageName(info)
|
||||
}
|
||||
return r.otherCache.CachePackageName(info)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
return r.moduleCacheCache.CacheExports(ctx, env, info)
|
||||
}
|
||||
return r.otherCache.CacheExports(ctx, env, info)
|
||||
}
|
||||
|
||||
// findModuleByDir returns the module that contains dir, or nil if no such
|
||||
// module is in scope.
|
||||
func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON {
|
||||
// This is quite tricky and may not be correct. dir could be:
|
||||
// - a package in the main module.
|
||||
// - a replace target underneath the main module's directory.
|
||||
// - a nested module in the above.
|
||||
// - a replace target somewhere totally random.
|
||||
// - a nested module in the above.
|
||||
// - in the mod cache.
|
||||
// - in /vendor/ in -mod=vendor mode.
|
||||
// - nested module? Dunno.
|
||||
// Rumor has it that replace targets cannot contain other replace targets.
|
||||
for _, m := range r.ModsByDir {
|
||||
if !strings.HasPrefix(dir, m.Dir) {
|
||||
continue
|
||||
}
|
||||
|
||||
if r.dirIsNestedModule(dir, m) {
|
||||
continue
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dirIsNestedModule reports if dir is contained in a nested module underneath
|
||||
// mod, not actually in mod.
|
||||
func (r *ModuleResolver) dirIsNestedModule(dir string, mod *ModuleJSON) bool {
|
||||
if !strings.HasPrefix(dir, mod.Dir) {
|
||||
return false
|
||||
}
|
||||
if r.dirInModuleCache(dir) {
|
||||
// Nested modules in the module cache are pruned,
|
||||
// so it cannot be a nested module.
|
||||
return false
|
||||
}
|
||||
if mod != nil && mod == r.dummyVendorMod {
|
||||
// The /vendor pseudomodule is flattened and doesn't actually count.
|
||||
return false
|
||||
}
|
||||
modDir, _ := r.modInfo(dir)
|
||||
if modDir == "" {
|
||||
return false
|
||||
}
|
||||
return modDir != mod.Dir
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) {
|
||||
readModName := func(modFile string) string {
|
||||
modBytes, err := ioutil.ReadFile(modFile)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return modulePath(modBytes)
|
||||
}
|
||||
|
||||
if r.dirInModuleCache(dir) {
|
||||
matches := modCacheRegexp.FindStringSubmatch(dir)
|
||||
index := strings.Index(dir, matches[1]+"@"+matches[2])
|
||||
modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2])
|
||||
return modDir, readModName(filepath.Join(modDir, "go.mod"))
|
||||
}
|
||||
for {
|
||||
if info, ok := r.cacheLoad(dir); ok {
|
||||
return info.moduleDir, info.moduleName
|
||||
}
|
||||
f := filepath.Join(dir, "go.mod")
|
||||
info, err := os.Stat(f)
|
||||
if err == nil && !info.IsDir() {
|
||||
return dir, readModName(f)
|
||||
}
|
||||
|
||||
d := filepath.Dir(dir)
|
||||
if len(d) >= len(dir) {
|
||||
return "", "" // reached top of file system, no go.mod
|
||||
}
|
||||
dir = d
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) dirInModuleCache(dir string) bool {
|
||||
if r.moduleCacheDir == "" {
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(dir, r.moduleCacheDir)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := map[string]string{}
|
||||
for _, path := range importPaths {
|
||||
_, packageDir := r.findPackage(path)
|
||||
if packageDir == "" {
|
||||
continue
|
||||
}
|
||||
name, err := packageDirToName(packageDir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
names[path] = name
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Walk GOROOT, GOPATH/pkg/mod, and the main module.
|
||||
roots := []gopathwalk.Root{
|
||||
{filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
|
||||
}
|
||||
if r.Main != nil {
|
||||
roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule})
|
||||
}
|
||||
if r.dummyVendorMod != nil {
|
||||
roots = append(roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther})
|
||||
} else {
|
||||
roots = append(roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache})
|
||||
// Walk replace targets, just in case they're not in any of the above.
|
||||
for _, mod := range r.ModsByModPath {
|
||||
if mod.Replace != nil {
|
||||
roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
roots = filterRoots(roots, exclude)
|
||||
|
||||
var result []*pkg
|
||||
var mu sync.Mutex
|
||||
|
||||
// We assume cached directories have not changed. We can skip them and their
|
||||
// children.
|
||||
skip := func(root gopathwalk.Root, dir string) bool {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
info, ok := r.cacheLoad(dir)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// This directory can be skipped as long as we have already scanned it.
|
||||
// Packages with errors will continue to have errors, so there is no need
|
||||
// to rescan them.
|
||||
packageScanned, _ := info.reachedStatus(directoryScanned)
|
||||
return packageScanned
|
||||
}
|
||||
|
||||
// Add anything new to the cache. We'll process everything in it below.
|
||||
add := func(root gopathwalk.Root, dir string) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
r.cacheStore(r.scanDirForPackage(root, dir))
|
||||
}
|
||||
|
||||
gopathwalk.WalkSkip(roots, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true})
|
||||
|
||||
// Everything we already had, and everything new, is now in the cache.
|
||||
for _, dir := range r.cacheKeys() {
|
||||
info, ok := r.cacheLoad(dir)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip this directory if we were not able to get the package information successfully.
|
||||
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// If we want package names, make sure the cache has them.
|
||||
if loadNames {
|
||||
var err error
|
||||
if info, err = r.cachePackageName(info); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
res, err := r.canonicalize(info)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
result = append(result, res)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// canonicalize gets the result of canonicalizing the packages using the results
|
||||
// of initializing the resolver from 'go list -m'.
|
||||
func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
|
||||
// Packages in GOROOT are already canonical, regardless of the std/cmd modules.
|
||||
if info.rootType == gopathwalk.RootGOROOT {
|
||||
return &pkg{
|
||||
importPathShort: info.nonCanonicalImportPath,
|
||||
dir: info.dir,
|
||||
packageName: path.Base(info.nonCanonicalImportPath),
|
||||
relevance: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
importPath := info.nonCanonicalImportPath
|
||||
relevance := 2
|
||||
// Check if the directory is underneath a module that's in scope.
|
||||
if mod := r.findModuleByDir(info.dir); mod != nil {
|
||||
relevance = 1
|
||||
// It is. If dir is the target of a replace directive,
|
||||
// our guessed import path is wrong. Use the real one.
|
||||
if mod.Dir == info.dir {
|
||||
importPath = mod.Path
|
||||
} else {
|
||||
dirInMod := info.dir[len(mod.Dir)+len("/"):]
|
||||
importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod))
|
||||
}
|
||||
} else if info.needsReplace {
|
||||
return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir)
|
||||
}
|
||||
|
||||
res := &pkg{
|
||||
importPathShort: importPath,
|
||||
dir: info.dir,
|
||||
packageName: info.packageName, // may not be populated if the caller didn't ask for it
|
||||
relevance: relevance,
|
||||
}
|
||||
// We may have discovered a package that has a different version
|
||||
// in scope already. Canonicalize to that one if possible.
|
||||
if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" {
|
||||
res.dir = canonicalDir
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if info, ok := r.cacheLoad(pkg.dir); ok {
|
||||
return r.cacheExports(ctx, r.env, info)
|
||||
}
|
||||
return loadExportsFromFiles(ctx, r.env, pkg.dir)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
|
||||
subdir := ""
|
||||
if dir != root.Path {
|
||||
subdir = dir[len(root.Path)+len("/"):]
|
||||
}
|
||||
importPath := filepath.ToSlash(subdir)
|
||||
if strings.HasPrefix(importPath, "vendor/") {
|
||||
// Only enter vendor directories if they're explicitly requested as a root.
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("unwanted vendor directory"),
|
||||
}
|
||||
}
|
||||
switch root.Type {
|
||||
case gopathwalk.RootCurrentModule:
|
||||
importPath = path.Join(r.Main.Path, filepath.ToSlash(subdir))
|
||||
case gopathwalk.RootModuleCache:
|
||||
matches := modCacheRegexp.FindStringSubmatch(subdir)
|
||||
if len(matches) == 0 {
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("invalid module cache path: %v", subdir),
|
||||
}
|
||||
}
|
||||
modPath, err := module.DecodePath(filepath.ToSlash(matches[1]))
|
||||
if err != nil {
|
||||
if r.env.Debug {
|
||||
r.env.Logf("decoding module cache path %q: %v", subdir, err)
|
||||
}
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
|
||||
}
|
||||
}
|
||||
importPath = path.Join(modPath, filepath.ToSlash(matches[3]))
|
||||
}
|
||||
|
||||
modDir, modName := r.modInfo(dir)
|
||||
result := directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
dir: dir,
|
||||
rootType: root.Type,
|
||||
nonCanonicalImportPath: importPath,
|
||||
needsReplace: false,
|
||||
moduleDir: modDir,
|
||||
moduleName: modName,
|
||||
}
|
||||
if root.Type == gopathwalk.RootGOROOT {
|
||||
// stdlib packages are always in scope, despite the confusing go.mod
|
||||
return result
|
||||
}
|
||||
// Check that this package is not obviously impossible to import.
|
||||
if !strings.HasPrefix(importPath, modName) {
|
||||
// The module's declared path does not match
|
||||
// its expected path. It probably needs a
|
||||
// replace directive we don't have.
|
||||
result.needsReplace = true
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// modCacheRegexp splits a path in a module cache into module, module version, and package.
|
||||
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
|
||||
|
||||
var (
|
||||
slashSlash = []byte("//")
|
||||
moduleStr = []byte("module")
|
||||
)
|
||||
|
||||
// modulePath returns the module path from the gomod file text.
|
||||
// If it cannot find a module path, it returns an empty string.
|
||||
// It is tolerant of unrelated problems in the go.mod file.
|
||||
//
|
||||
// Copied from cmd/go/internal/modfile.
|
||||
func modulePath(mod []byte) string {
|
||||
for len(mod) > 0 {
|
||||
line := mod
|
||||
mod = nil
|
||||
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||
line, mod = line[:i], line[i+1:]
|
||||
}
|
||||
if i := bytes.Index(line, slashSlash); i >= 0 {
|
||||
line = line[:i]
|
||||
}
|
||||
line = bytes.TrimSpace(line)
|
||||
if !bytes.HasPrefix(line, moduleStr) {
|
||||
continue
|
||||
}
|
||||
line = line[len(moduleStr):]
|
||||
n := len(line)
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == n || len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if line[0] == '"' || line[0] == '`' {
|
||||
p, err := strconv.Unquote(string(line))
|
||||
if err != nil {
|
||||
return "" // malformed quoted string or multiline module path
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
return string(line)
|
||||
}
|
||||
return "" // missing module path
|
||||
}
|
||||
|
||||
var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
|
||||
|
||||
// vendorEnabled indicates if vendoring is enabled.
|
||||
// Inspired by setDefaultBuildMod in modload/init.go
|
||||
func vendorEnabled(env *ProcessEnv) (*ModuleJSON, bool, error) {
|
||||
mainMod, go114, err := getMainModuleAnd114(env)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
matches := modFlagRegexp.FindStringSubmatch(env.GOFLAGS)
|
||||
var modFlag string
|
||||
if len(matches) != 0 {
|
||||
modFlag = matches[1]
|
||||
}
|
||||
if modFlag != "" {
|
||||
// Don't override an explicit '-mod=' argument.
|
||||
return mainMod, modFlag == "vendor", nil
|
||||
}
|
||||
if mainMod == nil || !go114 {
|
||||
return mainMod, false, nil
|
||||
}
|
||||
// Check 1.14's automatic vendor mode.
|
||||
if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() {
|
||||
if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 {
|
||||
// The Go version is at least 1.14, and a vendor directory exists.
|
||||
// Set -mod=vendor by default.
|
||||
return mainMod, true, nil
|
||||
}
|
||||
}
|
||||
return mainMod, false, nil
|
||||
}
|
||||
|
||||
// getMainModuleAnd114 gets the main module's information and whether the
|
||||
// go command in use is 1.14+. This is the information needed to figure out
|
||||
// if vendoring should be enabled.
|
||||
func getMainModuleAnd114(env *ProcessEnv) (*ModuleJSON, bool, error) {
|
||||
const format = `{{.Path}}
|
||||
{{.Dir}}
|
||||
{{.GoMod}}
|
||||
{{.GoVersion}}
|
||||
{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}
|
||||
`
|
||||
stdout, err := env.invokeGo("list", "-m", "-f", format)
|
||||
if err != nil {
|
||||
return nil, false, nil
|
||||
}
|
||||
lines := strings.Split(stdout.String(), "\n")
|
||||
if len(lines) < 5 {
|
||||
return nil, false, fmt.Errorf("unexpected stdout: %q", stdout)
|
||||
}
|
||||
mod := &ModuleJSON{
|
||||
Path: lines[0],
|
||||
Dir: lines[1],
|
||||
GoMod: lines[2],
|
||||
GoVersion: lines[3],
|
||||
Main: true,
|
||||
}
|
||||
return mod, lines[4] == "go1.14", nil
|
||||
}
|
|
@ -0,0 +1,165 @@
|
|||
package imports
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
)
|
||||
|
||||
// To find packages to import, the resolver needs to know about all of the
|
||||
// the packages that could be imported. This includes packages that are
|
||||
// already in modules that are in (1) the current module, (2) replace targets,
|
||||
// and (3) packages in the module cache. Packages in (1) and (2) may change over
|
||||
// time, as the client may edit the current module and locally replaced modules.
|
||||
// The module cache (which includes all of the packages in (3)) can only
|
||||
// ever be added to.
|
||||
//
|
||||
// The resolver can thus save state about packages in the module cache
|
||||
// and guarantee that this will not change over time. To obtain information
|
||||
// about new modules added to the module cache, the module cache should be
|
||||
// rescanned.
|
||||
//
|
||||
// It is OK to serve information about modules that have been deleted,
|
||||
// as they do still exist.
|
||||
// TODO(suzmue): can we share information with the caller about
|
||||
// what module needs to be downloaded to import this package?
|
||||
|
||||
type directoryPackageStatus int
|
||||
|
||||
const (
|
||||
_ directoryPackageStatus = iota
|
||||
directoryScanned
|
||||
nameLoaded
|
||||
exportsLoaded
|
||||
)
|
||||
|
||||
type directoryPackageInfo struct {
|
||||
// status indicates the extent to which this struct has been filled in.
|
||||
status directoryPackageStatus
|
||||
// err is non-nil when there was an error trying to reach status.
|
||||
err error
|
||||
|
||||
// Set when status >= directoryScanned.
|
||||
|
||||
// dir is the absolute directory of this package.
|
||||
dir string
|
||||
rootType gopathwalk.RootType
|
||||
// nonCanonicalImportPath is the package's expected import path. It may
|
||||
// not actually be importable at that path.
|
||||
nonCanonicalImportPath string
|
||||
// needsReplace is true if the nonCanonicalImportPath does not match the
|
||||
// module's declared path, making it impossible to import without a
|
||||
// replace directive.
|
||||
needsReplace bool
|
||||
|
||||
// Module-related information.
|
||||
moduleDir string // The directory that is the module root of this dir.
|
||||
moduleName string // The module name that contains this dir.
|
||||
|
||||
// Set when status >= nameLoaded.
|
||||
|
||||
packageName string // the package name, as declared in the source.
|
||||
|
||||
// Set when status >= exportsLoaded.
|
||||
|
||||
exports []string
|
||||
}
|
||||
|
||||
// reachedStatus returns true when info has a status at least target and any error associated with
|
||||
// an attempt to reach target.
|
||||
func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (bool, error) {
|
||||
if info.err == nil {
|
||||
return info.status >= target, nil
|
||||
}
|
||||
if info.status == target {
|
||||
return true, info.err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// dirInfoCache is a concurrency safe map for storing information about
|
||||
// directories that may contain packages.
|
||||
//
|
||||
// The information in this cache is built incrementally. Entries are initialized in scan.
|
||||
// No new keys should be added in any other functions, as all directories containing
|
||||
// packages are identified in scan.
|
||||
//
|
||||
// Other functions, including loadExports and findPackage, may update entries in this cache
|
||||
// as they discover new things about the directory.
|
||||
//
|
||||
// The information in the cache is not expected to change for the cache's
|
||||
// lifetime, so there is no protection against competing writes. Users should
|
||||
// take care not to hold the cache across changes to the underlying files.
|
||||
//
|
||||
// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc)
|
||||
type dirInfoCache struct {
|
||||
mu sync.Mutex
|
||||
// dirs stores information about packages in directories, keyed by absolute path.
|
||||
dirs map[string]*directoryPackageInfo
|
||||
}
|
||||
|
||||
// Store stores the package info for dir.
|
||||
func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
stored := info // defensive copy
|
||||
d.dirs[dir] = &stored
|
||||
}
|
||||
|
||||
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
|
||||
func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
info, ok := d.dirs[dir]
|
||||
if !ok {
|
||||
return directoryPackageInfo{}, false
|
||||
}
|
||||
return *info, true
|
||||
}
|
||||
|
||||
// Keys returns the keys currently present in d.
|
||||
func (d *dirInfoCache) Keys() (keys []string) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
for key := range d.dirs {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) {
|
||||
if loaded, err := info.reachedStatus(nameLoaded); loaded {
|
||||
return info, err
|
||||
}
|
||||
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
|
||||
return info, fmt.Errorf("cannot read package name, scan error: %v", err)
|
||||
}
|
||||
info.packageName, info.err = packageDirToName(info.dir)
|
||||
info.status = nameLoaded
|
||||
d.Store(info.dir, info)
|
||||
return info, info.err
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
||||
if reached, _ := info.reachedStatus(exportsLoaded); reached {
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
if reached, err := info.reachedStatus(nameLoaded); reached && err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir)
|
||||
if info.err == context.Canceled {
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
// The cache structure wants things to proceed linearly. We can skip a
|
||||
// step here, but only if we succeed.
|
||||
if info.status == nameLoaded || info.err == nil {
|
||||
info.status = exportsLoaded
|
||||
} else {
|
||||
info.status = nameLoaded
|
||||
}
|
||||
d.Store(info.dir, info)
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
|
@ -0,0 +1,280 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Hacked up copy of go/ast/import.go
|
||||
|
||||
package imports
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// sortImports sorts runs of consecutive import lines in import blocks in f.
|
||||
// It also removes duplicate imports when it is possible to do so without data loss.
|
||||
func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) {
|
||||
for i, d := range f.Decls {
|
||||
d, ok := d.(*ast.GenDecl)
|
||||
if !ok || d.Tok != token.IMPORT {
|
||||
// Not an import declaration, so we're done.
|
||||
// Imports are always first.
|
||||
break
|
||||
}
|
||||
|
||||
if len(d.Specs) == 0 {
|
||||
// Empty import block, remove it.
|
||||
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
|
||||
}
|
||||
|
||||
if !d.Lparen.IsValid() {
|
||||
// Not a block: sorted by default.
|
||||
continue
|
||||
}
|
||||
|
||||
// Identify and sort runs of specs on successive lines.
|
||||
i := 0
|
||||
specs := d.Specs[:0]
|
||||
for j, s := range d.Specs {
|
||||
if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
|
||||
// j begins a new run. End this one.
|
||||
specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:j])...)
|
||||
i = j
|
||||
}
|
||||
}
|
||||
specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:])...)
|
||||
d.Specs = specs
|
||||
|
||||
// Deduping can leave a blank line before the rparen; clean that up.
|
||||
if len(d.Specs) > 0 {
|
||||
lastSpec := d.Specs[len(d.Specs)-1]
|
||||
lastLine := fset.Position(lastSpec.Pos()).Line
|
||||
if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
|
||||
fset.File(d.Rparen).MergeLine(rParenLine - 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mergeImports merges all the import declarations into the first one.
|
||||
// Taken from golang.org/x/tools/ast/astutil.
|
||||
func mergeImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) {
|
||||
if len(f.Decls) <= 1 {
|
||||
return
|
||||
}
|
||||
|
||||
// Merge all the import declarations into the first one.
|
||||
var first *ast.GenDecl
|
||||
for i := 0; i < len(f.Decls); i++ {
|
||||
decl := f.Decls[i]
|
||||
gen, ok := decl.(*ast.GenDecl)
|
||||
if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
|
||||
continue
|
||||
}
|
||||
if first == nil {
|
||||
first = gen
|
||||
continue // Don't touch the first one.
|
||||
}
|
||||
// We now know there is more than one package in this import
|
||||
// declaration. Ensure that it ends up parenthesized.
|
||||
first.Lparen = first.Pos()
|
||||
// Move the imports of the other import declaration to the first one.
|
||||
for _, spec := range gen.Specs {
|
||||
spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
|
||||
first.Specs = append(first.Specs, spec)
|
||||
}
|
||||
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
|
||||
// declImports reports whether gen contains an import of path.
|
||||
// Taken from golang.org/x/tools/ast/astutil.
|
||||
func declImports(gen *ast.GenDecl, path string) bool {
|
||||
if gen.Tok != token.IMPORT {
|
||||
return false
|
||||
}
|
||||
for _, spec := range gen.Specs {
|
||||
impspec := spec.(*ast.ImportSpec)
|
||||
if importPath(impspec) == path {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func importPath(s ast.Spec) string {
|
||||
t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value)
|
||||
if err == nil {
|
||||
return t
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func importName(s ast.Spec) string {
|
||||
n := s.(*ast.ImportSpec).Name
|
||||
if n == nil {
|
||||
return ""
|
||||
}
|
||||
return n.Name
|
||||
}
|
||||
|
||||
func importComment(s ast.Spec) string {
|
||||
c := s.(*ast.ImportSpec).Comment
|
||||
if c == nil {
|
||||
return ""
|
||||
}
|
||||
return c.Text()
|
||||
}
|
||||
|
||||
// collapse indicates whether prev may be removed, leaving only next.
|
||||
func collapse(prev, next ast.Spec) bool {
|
||||
if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
|
||||
return false
|
||||
}
|
||||
return prev.(*ast.ImportSpec).Comment == nil
|
||||
}
|
||||
|
||||
type posSpan struct {
|
||||
Start token.Pos
|
||||
End token.Pos
|
||||
}
|
||||
|
||||
func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
|
||||
// Can't short-circuit here even if specs are already sorted,
|
||||
// since they might yet need deduplication.
|
||||
// A lone import, however, may be safely ignored.
|
||||
if len(specs) <= 1 {
|
||||
return specs
|
||||
}
|
||||
|
||||
// Record positions for specs.
|
||||
pos := make([]posSpan, len(specs))
|
||||
for i, s := range specs {
|
||||
pos[i] = posSpan{s.Pos(), s.End()}
|
||||
}
|
||||
|
||||
// Identify comments in this range.
|
||||
// Any comment from pos[0].Start to the final line counts.
|
||||
lastLine := fset.Position(pos[len(pos)-1].End).Line
|
||||
cstart := len(f.Comments)
|
||||
cend := len(f.Comments)
|
||||
for i, g := range f.Comments {
|
||||
if g.Pos() < pos[0].Start {
|
||||
continue
|
||||
}
|
||||
if i < cstart {
|
||||
cstart = i
|
||||
}
|
||||
if fset.Position(g.End()).Line > lastLine {
|
||||
cend = i
|
||||
break
|
||||
}
|
||||
}
|
||||
comments := f.Comments[cstart:cend]
|
||||
|
||||
// Assign each comment to the import spec preceding it.
|
||||
importComment := map[*ast.ImportSpec][]*ast.CommentGroup{}
|
||||
specIndex := 0
|
||||
for _, g := range comments {
|
||||
for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
|
||||
specIndex++
|
||||
}
|
||||
s := specs[specIndex].(*ast.ImportSpec)
|
||||
importComment[s] = append(importComment[s], g)
|
||||
}
|
||||
|
||||
// Sort the import specs by import path.
|
||||
// Remove duplicates, when possible without data loss.
|
||||
// Reassign the import paths to have the same position sequence.
|
||||
// Reassign each comment to abut the end of its spec.
|
||||
// Sort the comments by new position.
|
||||
sort.Sort(byImportSpec{env, specs})
|
||||
|
||||
// Dedup. Thanks to our sorting, we can just consider
|
||||
// adjacent pairs of imports.
|
||||
deduped := specs[:0]
|
||||
for i, s := range specs {
|
||||
if i == len(specs)-1 || !collapse(s, specs[i+1]) {
|
||||
deduped = append(deduped, s)
|
||||
} else {
|
||||
p := s.Pos()
|
||||
fset.File(p).MergeLine(fset.Position(p).Line)
|
||||
}
|
||||
}
|
||||
specs = deduped
|
||||
|
||||
// Fix up comment positions
|
||||
for i, s := range specs {
|
||||
s := s.(*ast.ImportSpec)
|
||||
if s.Name != nil {
|
||||
s.Name.NamePos = pos[i].Start
|
||||
}
|
||||
s.Path.ValuePos = pos[i].Start
|
||||
s.EndPos = pos[i].End
|
||||
nextSpecPos := pos[i].End
|
||||
|
||||
for _, g := range importComment[s] {
|
||||
for _, c := range g.List {
|
||||
c.Slash = pos[i].End
|
||||
nextSpecPos = c.End()
|
||||
}
|
||||
}
|
||||
if i < len(specs)-1 {
|
||||
pos[i+1].Start = nextSpecPos
|
||||
pos[i+1].End = nextSpecPos
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byCommentPos(comments))
|
||||
|
||||
// Fixup comments can insert blank lines, because import specs are on different lines.
|
||||
// We remove those blank lines here by merging import spec to the first import spec line.
|
||||
firstSpecLine := fset.Position(specs[0].Pos()).Line
|
||||
for _, s := range specs[1:] {
|
||||
p := s.Pos()
|
||||
line := fset.File(p).Line(p)
|
||||
for previousLine := line - 1; previousLine >= firstSpecLine; {
|
||||
fset.File(p).MergeLine(previousLine)
|
||||
previousLine--
|
||||
}
|
||||
}
|
||||
return specs
|
||||
}
|
||||
|
||||
type byImportSpec struct {
|
||||
env *ProcessEnv
|
||||
specs []ast.Spec // slice of *ast.ImportSpec
|
||||
}
|
||||
|
||||
func (x byImportSpec) Len() int { return len(x.specs) }
|
||||
func (x byImportSpec) Swap(i, j int) { x.specs[i], x.specs[j] = x.specs[j], x.specs[i] }
|
||||
func (x byImportSpec) Less(i, j int) bool {
|
||||
ipath := importPath(x.specs[i])
|
||||
jpath := importPath(x.specs[j])
|
||||
|
||||
igroup := importGroup(x.env, ipath)
|
||||
jgroup := importGroup(x.env, jpath)
|
||||
if igroup != jgroup {
|
||||
return igroup < jgroup
|
||||
}
|
||||
|
||||
if ipath != jpath {
|
||||
return ipath < jpath
|
||||
}
|
||||
iname := importName(x.specs[i])
|
||||
jname := importName(x.specs[j])
|
||||
|
||||
if iname != jname {
|
||||
return iname < jname
|
||||
}
|
||||
return importComment(x.specs[i]) < importComment(x.specs[j])
|
||||
}
|
||||
|
||||
type byCommentPos []*ast.CommentGroup
|
||||
|
||||
func (x byCommentPos) Len() int { return len(x) }
|
||||
func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,10 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["module.go"],
|
||||
importmap = "k8s.io/kops/vendor/golang.org/x/tools/internal/module",
|
||||
importpath = "golang.org/x/tools/internal/module",
|
||||
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||
deps = ["//vendor/golang.org/x/tools/internal/semver:go_default_library"],
|
||||
)
|
|
@ -0,0 +1,540 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package module defines the module.Version type
|
||||
// along with support code.
|
||||
package module
|
||||
|
||||
// IMPORTANT NOTE
|
||||
//
|
||||
// This file essentially defines the set of valid import paths for the go command.
|
||||
// There are many subtle considerations, including Unicode ambiguity,
|
||||
// security, network, and file system representations.
|
||||
//
|
||||
// This file also defines the set of valid module path and version combinations,
|
||||
// another topic with many subtle considerations.
|
||||
//
|
||||
// Changes to the semantics in this file require approval from rsc.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/tools/internal/semver"
|
||||
)
|
||||
|
||||
// A Version is defined by a module path and version pair.
|
||||
type Version struct {
|
||||
Path string
|
||||
|
||||
// Version is usually a semantic version in canonical form.
|
||||
// There are two exceptions to this general rule.
|
||||
// First, the top-level target of a build has no specific version
|
||||
// and uses Version = "".
|
||||
// Second, during MVS calculations the version "none" is used
|
||||
// to represent the decision to take no version of a given module.
|
||||
Version string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Check checks that a given module path, version pair is valid.
|
||||
// In addition to the path being a valid module path
|
||||
// and the version being a valid semantic version,
|
||||
// the two must correspond.
|
||||
// For example, the path "yaml/v2" only corresponds to
|
||||
// semantic versions beginning with "v2.".
|
||||
func Check(path, version string) error {
|
||||
if err := CheckPath(path); err != nil {
|
||||
return err
|
||||
}
|
||||
if !semver.IsValid(version) {
|
||||
return fmt.Errorf("malformed semantic version %v", version)
|
||||
}
|
||||
_, pathMajor, _ := SplitPathVersion(path)
|
||||
if !MatchPathMajor(version, pathMajor) {
|
||||
if pathMajor == "" {
|
||||
pathMajor = "v0 or v1"
|
||||
}
|
||||
if pathMajor[0] == '.' { // .v1
|
||||
pathMajor = pathMajor[1:]
|
||||
}
|
||||
return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// firstPathOK reports whether r can appear in the first element of a module path.
|
||||
// The first element of the path must be an LDH domain name, at least for now.
|
||||
// To avoid case ambiguity, the domain name must be entirely lower case.
|
||||
func firstPathOK(r rune) bool {
|
||||
return r == '-' || r == '.' ||
|
||||
'0' <= r && r <= '9' ||
|
||||
'a' <= r && r <= 'z'
|
||||
}
|
||||
|
||||
// pathOK reports whether r can appear in an import path element.
|
||||
// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
|
||||
// This matches what "go get" has historically recognized in import paths.
|
||||
// TODO(rsc): We would like to allow Unicode letters, but that requires additional
|
||||
// care in the safe encoding (see note below).
|
||||
func pathOK(r rune) bool {
|
||||
if r < utf8.RuneSelf {
|
||||
return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
|
||||
'0' <= r && r <= '9' ||
|
||||
'A' <= r && r <= 'Z' ||
|
||||
'a' <= r && r <= 'z'
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// fileNameOK reports whether r can appear in a file name.
|
||||
// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
|
||||
// If we expand the set of allowed characters here, we have to
|
||||
// work harder at detecting potential case-folding and normalization collisions.
|
||||
// See note about "safe encoding" below.
|
||||
func fileNameOK(r rune) bool {
|
||||
if r < utf8.RuneSelf {
|
||||
// Entire set of ASCII punctuation, from which we remove characters:
|
||||
// ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
|
||||
// We disallow some shell special characters: " ' * < > ? ` |
|
||||
// (Note that some of those are disallowed by the Windows file system as well.)
|
||||
// We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
|
||||
// We allow spaces (U+0020) in file names.
|
||||
const allowed = "!#$%&()+,-.=@[]^_{}~ "
|
||||
if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
|
||||
return true
|
||||
}
|
||||
for i := 0; i < len(allowed); i++ {
|
||||
if rune(allowed[i]) == r {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
// It may be OK to add more ASCII punctuation here, but only carefully.
|
||||
// For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
|
||||
return unicode.IsLetter(r)
|
||||
}
|
||||
|
||||
// CheckPath checks that a module path is valid.
|
||||
func CheckPath(path string) error {
|
||||
if err := checkPath(path, false); err != nil {
|
||||
return fmt.Errorf("malformed module path %q: %v", path, err)
|
||||
}
|
||||
i := strings.Index(path, "/")
|
||||
if i < 0 {
|
||||
i = len(path)
|
||||
}
|
||||
if i == 0 {
|
||||
return fmt.Errorf("malformed module path %q: leading slash", path)
|
||||
}
|
||||
if !strings.Contains(path[:i], ".") {
|
||||
return fmt.Errorf("malformed module path %q: missing dot in first path element", path)
|
||||
}
|
||||
if path[0] == '-' {
|
||||
return fmt.Errorf("malformed module path %q: leading dash in first path element", path)
|
||||
}
|
||||
for _, r := range path[:i] {
|
||||
if !firstPathOK(r) {
|
||||
return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r)
|
||||
}
|
||||
}
|
||||
if _, _, ok := SplitPathVersion(path); !ok {
|
||||
return fmt.Errorf("malformed module path %q: invalid version", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckImportPath checks that an import path is valid.
|
||||
func CheckImportPath(path string) error {
|
||||
if err := checkPath(path, false); err != nil {
|
||||
return fmt.Errorf("malformed import path %q: %v", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkPath checks that a general path is valid.
|
||||
// It returns an error describing why but not mentioning path.
|
||||
// Because these checks apply to both module paths and import paths,
|
||||
// the caller is expected to add the "malformed ___ path %q: " prefix.
|
||||
// fileName indicates whether the final element of the path is a file name
|
||||
// (as opposed to a directory name).
|
||||
func checkPath(path string, fileName bool) error {
|
||||
if !utf8.ValidString(path) {
|
||||
return fmt.Errorf("invalid UTF-8")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("empty string")
|
||||
}
|
||||
if strings.Contains(path, "..") {
|
||||
return fmt.Errorf("double dot")
|
||||
}
|
||||
if strings.Contains(path, "//") {
|
||||
return fmt.Errorf("double slash")
|
||||
}
|
||||
if path[len(path)-1] == '/' {
|
||||
return fmt.Errorf("trailing slash")
|
||||
}
|
||||
elemStart := 0
|
||||
for i, r := range path {
|
||||
if r == '/' {
|
||||
if err := checkElem(path[elemStart:i], fileName); err != nil {
|
||||
return err
|
||||
}
|
||||
elemStart = i + 1
|
||||
}
|
||||
}
|
||||
if err := checkElem(path[elemStart:], fileName); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkElem checks whether an individual path element is valid.
|
||||
// fileName indicates whether the element is a file name (not a directory name).
|
||||
func checkElem(elem string, fileName bool) error {
|
||||
if elem == "" {
|
||||
return fmt.Errorf("empty path element")
|
||||
}
|
||||
if strings.Count(elem, ".") == len(elem) {
|
||||
return fmt.Errorf("invalid path element %q", elem)
|
||||
}
|
||||
if elem[0] == '.' && !fileName {
|
||||
return fmt.Errorf("leading dot in path element")
|
||||
}
|
||||
if elem[len(elem)-1] == '.' {
|
||||
return fmt.Errorf("trailing dot in path element")
|
||||
}
|
||||
charOK := pathOK
|
||||
if fileName {
|
||||
charOK = fileNameOK
|
||||
}
|
||||
for _, r := range elem {
|
||||
if !charOK(r) {
|
||||
return fmt.Errorf("invalid char %q", r)
|
||||
}
|
||||
}
|
||||
|
||||
// Windows disallows a bunch of path elements, sadly.
|
||||
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
|
||||
short := elem
|
||||
if i := strings.Index(short, "."); i >= 0 {
|
||||
short = short[:i]
|
||||
}
|
||||
for _, bad := range badWindowsNames {
|
||||
if strings.EqualFold(bad, short) {
|
||||
return fmt.Errorf("disallowed path element %q", elem)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckFilePath checks whether a slash-separated file path is valid.
|
||||
func CheckFilePath(path string) error {
|
||||
if err := checkPath(path, true); err != nil {
|
||||
return fmt.Errorf("malformed file path %q: %v", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// badWindowsNames are the reserved file path elements on Windows.
|
||||
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
|
||||
var badWindowsNames = []string{
|
||||
"CON",
|
||||
"PRN",
|
||||
"AUX",
|
||||
"NUL",
|
||||
"COM1",
|
||||
"COM2",
|
||||
"COM3",
|
||||
"COM4",
|
||||
"COM5",
|
||||
"COM6",
|
||||
"COM7",
|
||||
"COM8",
|
||||
"COM9",
|
||||
"LPT1",
|
||||
"LPT2",
|
||||
"LPT3",
|
||||
"LPT4",
|
||||
"LPT5",
|
||||
"LPT6",
|
||||
"LPT7",
|
||||
"LPT8",
|
||||
"LPT9",
|
||||
}
|
||||
|
||||
// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path
|
||||
// and version is either empty or "/vN" for N >= 2.
|
||||
// As a special case, gopkg.in paths are recognized directly;
|
||||
// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
|
||||
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
|
||||
if strings.HasPrefix(path, "gopkg.in/") {
|
||||
return splitGopkgIn(path)
|
||||
}
|
||||
|
||||
i := len(path)
|
||||
dot := false
|
||||
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
|
||||
if path[i-1] == '.' {
|
||||
dot = true
|
||||
}
|
||||
i--
|
||||
}
|
||||
if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' {
|
||||
return path, "", true
|
||||
}
|
||||
prefix, pathMajor = path[:i-2], path[i-2:]
|
||||
if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" {
|
||||
return path, "", false
|
||||
}
|
||||
return prefix, pathMajor, true
|
||||
}
|
||||
|
||||
// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths.
|
||||
func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
|
||||
if !strings.HasPrefix(path, "gopkg.in/") {
|
||||
return path, "", false
|
||||
}
|
||||
i := len(path)
|
||||
if strings.HasSuffix(path, "-unstable") {
|
||||
i -= len("-unstable")
|
||||
}
|
||||
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') {
|
||||
i--
|
||||
}
|
||||
if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' {
|
||||
// All gopkg.in paths must end in vN for some N.
|
||||
return path, "", false
|
||||
}
|
||||
prefix, pathMajor = path[:i-2], path[i-2:]
|
||||
if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" {
|
||||
return path, "", false
|
||||
}
|
||||
return prefix, pathMajor, true
|
||||
}
|
||||
|
||||
// MatchPathMajor reports whether the semantic version v
|
||||
// matches the path major version pathMajor.
|
||||
func MatchPathMajor(v, pathMajor string) bool {
|
||||
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
|
||||
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
|
||||
}
|
||||
if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
|
||||
// Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
|
||||
// For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
|
||||
return true
|
||||
}
|
||||
m := semver.Major(v)
|
||||
if pathMajor == "" {
|
||||
return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible"
|
||||
}
|
||||
return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:]
|
||||
}
|
||||
|
||||
// CanonicalVersion returns the canonical form of the version string v.
|
||||
// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
|
||||
func CanonicalVersion(v string) string {
|
||||
cv := semver.Canonical(v)
|
||||
if semver.Build(v) == "+incompatible" {
|
||||
cv += "+incompatible"
|
||||
}
|
||||
return cv
|
||||
}
|
||||
|
||||
// Sort sorts the list by Path, breaking ties by comparing Versions.
|
||||
func Sort(list []Version) {
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
mi := list[i]
|
||||
mj := list[j]
|
||||
if mi.Path != mj.Path {
|
||||
return mi.Path < mj.Path
|
||||
}
|
||||
// To help go.sum formatting, allow version/file.
|
||||
// Compare semver prefix by semver rules,
|
||||
// file by string order.
|
||||
vi := mi.Version
|
||||
vj := mj.Version
|
||||
var fi, fj string
|
||||
if k := strings.Index(vi, "/"); k >= 0 {
|
||||
vi, fi = vi[:k], vi[k:]
|
||||
}
|
||||
if k := strings.Index(vj, "/"); k >= 0 {
|
||||
vj, fj = vj[:k], vj[k:]
|
||||
}
|
||||
if vi != vj {
|
||||
return semver.Compare(vi, vj) < 0
|
||||
}
|
||||
return fi < fj
|
||||
})
|
||||
}
|
||||
|
||||
// Safe encodings
|
||||
//
|
||||
// Module paths appear as substrings of file system paths
|
||||
// (in the download cache) and of web server URLs in the proxy protocol.
|
||||
// In general we cannot rely on file systems to be case-sensitive,
|
||||
// nor can we rely on web servers, since they read from file systems.
|
||||
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
|
||||
// and rsc.io/quote separate. Windows and macOS don't.
|
||||
// Instead, we must never require two different casings of a file path.
|
||||
// Because we want the download cache to match the proxy protocol,
|
||||
// and because we want the proxy protocol to be possible to serve
|
||||
// from a tree of static files (which might be stored on a case-insensitive
|
||||
// file system), the proxy protocol must never require two different casings
|
||||
// of a URL path either.
|
||||
//
|
||||
// One possibility would be to make the safe encoding be the lowercase
|
||||
// hexadecimal encoding of the actual path bytes. This would avoid ever
|
||||
// needing different casings of a file path, but it would be fairly illegible
|
||||
// to most programmers when those paths appeared in the file system
|
||||
// (including in file paths in compiler errors and stack traces)
|
||||
// in web server logs, and so on. Instead, we want a safe encoding that
|
||||
// leaves most paths unaltered.
|
||||
//
|
||||
// The safe encoding is this:
|
||||
// replace every uppercase letter with an exclamation mark
|
||||
// followed by the letter's lowercase equivalent.
|
||||
//
|
||||
// For example,
|
||||
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
|
||||
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
|
||||
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
|
||||
//
|
||||
// Import paths that avoid upper-case letters are left unchanged.
|
||||
// Note that because import paths are ASCII-only and avoid various
|
||||
// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
|
||||
// and avoids the same problematic punctuation.
|
||||
//
|
||||
// Import paths have never allowed exclamation marks, so there is no
|
||||
// need to define how to encode a literal !.
|
||||
//
|
||||
// Although paths are disallowed from using Unicode (see pathOK above),
|
||||
// the eventual plan is to allow Unicode letters as well, to assume that
|
||||
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
|
||||
// the !-for-uppercase convention. Note however that not all runes that
|
||||
// are different but case-fold equivalent are an upper/lower pair.
|
||||
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
|
||||
// are considered to case-fold to each other. When we do add Unicode
|
||||
// letters, we must not assume that upper/lower are the only case-equivalent pairs.
|
||||
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
|
||||
// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
|
||||
//
|
||||
// Also, it would be nice to allow Unicode marks as well as letters,
|
||||
// but marks include combining marks, and then we must deal not
|
||||
// only with case folding but also normalization: both U+00E9 ('é')
|
||||
// and U+0065 U+0301 ('e' followed by combining acute accent)
|
||||
// look the same on the page and are treated by some file systems
|
||||
// as the same path. If we do allow Unicode marks in paths, there
|
||||
// must be some kind of normalization to allow only one canonical
|
||||
// encoding of any character used in an import path.
|
||||
|
||||
// EncodePath returns the safe encoding of the given module path.
|
||||
// It fails if the module path is invalid.
|
||||
func EncodePath(path string) (encoding string, err error) {
|
||||
if err := CheckPath(path); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return encodeString(path)
|
||||
}
|
||||
|
||||
// EncodeVersion returns the safe encoding of the given module version.
|
||||
// Versions are allowed to be in non-semver form but must be valid file names
|
||||
// and not contain exclamation marks.
|
||||
func EncodeVersion(v string) (encoding string, err error) {
|
||||
if err := checkElem(v, true); err != nil || strings.Contains(v, "!") {
|
||||
return "", fmt.Errorf("disallowed version string %q", v)
|
||||
}
|
||||
return encodeString(v)
|
||||
}
|
||||
|
||||
func encodeString(s string) (encoding string, err error) {
|
||||
haveUpper := false
|
||||
for _, r := range s {
|
||||
if r == '!' || r >= utf8.RuneSelf {
|
||||
// This should be disallowed by CheckPath, but diagnose anyway.
|
||||
// The correctness of the encoding loop below depends on it.
|
||||
return "", fmt.Errorf("internal error: inconsistency in EncodePath")
|
||||
}
|
||||
if 'A' <= r && r <= 'Z' {
|
||||
haveUpper = true
|
||||
}
|
||||
}
|
||||
|
||||
if !haveUpper {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
for _, r := range s {
|
||||
if 'A' <= r && r <= 'Z' {
|
||||
buf = append(buf, '!', byte(r+'a'-'A'))
|
||||
} else {
|
||||
buf = append(buf, byte(r))
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// DecodePath returns the module path of the given safe encoding.
|
||||
// It fails if the encoding is invalid or encodes an invalid path.
|
||||
func DecodePath(encoding string) (path string, err error) {
|
||||
path, ok := decodeString(encoding)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("invalid module path encoding %q", encoding)
|
||||
}
|
||||
if err := CheckPath(path); err != nil {
|
||||
return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// DecodeVersion returns the version string for the given safe encoding.
|
||||
// It fails if the encoding is invalid or encodes an invalid version.
|
||||
// Versions are allowed to be in non-semver form but must be valid file names
|
||||
// and not contain exclamation marks.
|
||||
func DecodeVersion(encoding string) (v string, err error) {
|
||||
v, ok := decodeString(encoding)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("invalid version encoding %q", encoding)
|
||||
}
|
||||
if err := checkElem(v, true); err != nil {
|
||||
return "", fmt.Errorf("disallowed version string %q", v)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func decodeString(encoding string) (string, bool) {
|
||||
var buf []byte
|
||||
|
||||
bang := false
|
||||
for _, r := range encoding {
|
||||
if r >= utf8.RuneSelf {
|
||||
return "", false
|
||||
}
|
||||
if bang {
|
||||
bang = false
|
||||
if r < 'a' || 'z' < r {
|
||||
return "", false
|
||||
}
|
||||
buf = append(buf, byte(r+'A'-'a'))
|
||||
continue
|
||||
}
|
||||
if r == '!' {
|
||||
bang = true
|
||||
continue
|
||||
}
|
||||
if 'A' <= r && r <= 'Z' {
|
||||
return "", false
|
||||
}
|
||||
buf = append(buf, byte(r))
|
||||
}
|
||||
if bang {
|
||||
return "", false
|
||||
}
|
||||
return string(buf), true
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"parse.go",
|
||||
"span.go",
|
||||
"token.go",
|
||||
"token111.go",
|
||||
"token112.go",
|
||||
"uri.go",
|
||||
"utf16.go",
|
||||
],
|
||||
importmap = "k8s.io/kops/vendor/golang.org/x/tools/internal/span",
|
||||
importpath = "golang.org/x/tools/internal/span",
|
||||
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||
)
|
|
@ -0,0 +1,100 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Parse returns the location represented by the input.
|
||||
// All inputs are valid locations, as they can always be a pure filename.
|
||||
// The returned span will be normalized, and thus if printed may produce a
|
||||
// different string.
|
||||
func Parse(input string) Span {
|
||||
// :0:0#0-0:0#0
|
||||
valid := input
|
||||
var hold, offset int
|
||||
hadCol := false
|
||||
suf := rstripSuffix(input)
|
||||
if suf.sep == "#" {
|
||||
offset = suf.num
|
||||
suf = rstripSuffix(suf.remains)
|
||||
}
|
||||
if suf.sep == ":" {
|
||||
valid = suf.remains
|
||||
hold = suf.num
|
||||
hadCol = true
|
||||
suf = rstripSuffix(suf.remains)
|
||||
}
|
||||
switch {
|
||||
case suf.sep == ":":
|
||||
return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), Point{})
|
||||
case suf.sep == "-":
|
||||
// we have a span, fall out of the case to continue
|
||||
default:
|
||||
// separator not valid, rewind to either the : or the start
|
||||
return New(NewURI(valid), NewPoint(hold, 0, offset), Point{})
|
||||
}
|
||||
// only the span form can get here
|
||||
// at this point we still don't know what the numbers we have mean
|
||||
// if have not yet seen a : then we might have either a line or a column depending
|
||||
// on whether start has a column or not
|
||||
// we build an end point and will fix it later if needed
|
||||
end := NewPoint(suf.num, hold, offset)
|
||||
hold, offset = 0, 0
|
||||
suf = rstripSuffix(suf.remains)
|
||||
if suf.sep == "#" {
|
||||
offset = suf.num
|
||||
suf = rstripSuffix(suf.remains)
|
||||
}
|
||||
if suf.sep != ":" {
|
||||
// turns out we don't have a span after all, rewind
|
||||
return New(NewURI(valid), end, Point{})
|
||||
}
|
||||
valid = suf.remains
|
||||
hold = suf.num
|
||||
suf = rstripSuffix(suf.remains)
|
||||
if suf.sep != ":" {
|
||||
// line#offset only
|
||||
return New(NewURI(valid), NewPoint(hold, 0, offset), end)
|
||||
}
|
||||
// we have a column, so if end only had one number, it is also the column
|
||||
if !hadCol {
|
||||
end = NewPoint(suf.num, end.v.Line, end.v.Offset)
|
||||
}
|
||||
return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), end)
|
||||
}
|
||||
|
||||
type suffix struct {
|
||||
remains string
|
||||
sep string
|
||||
num int
|
||||
}
|
||||
|
||||
func rstripSuffix(input string) suffix {
|
||||
if len(input) == 0 {
|
||||
return suffix{"", "", -1}
|
||||
}
|
||||
remains := input
|
||||
num := -1
|
||||
// first see if we have a number at the end
|
||||
last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' })
|
||||
if last >= 0 && last < len(remains)-1 {
|
||||
number, err := strconv.ParseInt(remains[last+1:], 10, 64)
|
||||
if err == nil {
|
||||
num = int(number)
|
||||
remains = remains[:last+1]
|
||||
}
|
||||
}
|
||||
// now see if we have a trailing separator
|
||||
r, w := utf8.DecodeLastRuneInString(remains)
|
||||
if r != ':' && r != '#' && r == '#' {
|
||||
return suffix{input, "", -1}
|
||||
}
|
||||
remains = remains[:len(remains)-w]
|
||||
return suffix{remains, string(r), num}
|
||||
}
|
|
@ -0,0 +1,285 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package span contains support for representing with positions and ranges in
|
||||
// text files.
|
||||
package span
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
)
|
||||
|
||||
// Span represents a source code range in standardized form.
|
||||
type Span struct {
|
||||
v span
|
||||
}
|
||||
|
||||
// Point represents a single point within a file.
|
||||
// In general this should only be used as part of a Span, as on its own it
|
||||
// does not carry enough information.
|
||||
type Point struct {
|
||||
v point
|
||||
}
|
||||
|
||||
type span struct {
|
||||
URI URI `json:"uri"`
|
||||
Start point `json:"start"`
|
||||
End point `json:"end"`
|
||||
}
|
||||
|
||||
type point struct {
|
||||
Line int `json:"line"`
|
||||
Column int `json:"column"`
|
||||
Offset int `json:"offset"`
|
||||
}
|
||||
|
||||
// Invalid is a span that reports false from IsValid
|
||||
var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}}
|
||||
|
||||
var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}}
|
||||
|
||||
// Converter is the interface to an object that can convert between line:column
|
||||
// and offset forms for a single file.
|
||||
type Converter interface {
|
||||
//ToPosition converts from an offset to a line:column pair.
|
||||
ToPosition(offset int) (int, int, error)
|
||||
//ToOffset converts from a line:column pair to an offset.
|
||||
ToOffset(line, col int) (int, error)
|
||||
}
|
||||
|
||||
func New(uri URI, start Point, end Point) Span {
|
||||
s := Span{v: span{URI: uri, Start: start.v, End: end.v}}
|
||||
s.v.clean()
|
||||
return s
|
||||
}
|
||||
|
||||
func NewPoint(line, col, offset int) Point {
|
||||
p := Point{v: point{Line: line, Column: col, Offset: offset}}
|
||||
p.v.clean()
|
||||
return p
|
||||
}
|
||||
|
||||
func Compare(a, b Span) int {
|
||||
if r := CompareURI(a.URI(), b.URI()); r != 0 {
|
||||
return r
|
||||
}
|
||||
if r := comparePoint(a.v.Start, b.v.Start); r != 0 {
|
||||
return r
|
||||
}
|
||||
return comparePoint(a.v.End, b.v.End)
|
||||
}
|
||||
|
||||
func ComparePoint(a, b Point) int {
|
||||
return comparePoint(a.v, b.v)
|
||||
}
|
||||
|
||||
func comparePoint(a, b point) int {
|
||||
if !a.hasPosition() {
|
||||
if a.Offset < b.Offset {
|
||||
return -1
|
||||
}
|
||||
if a.Offset > b.Offset {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
if a.Line < b.Line {
|
||||
return -1
|
||||
}
|
||||
if a.Line > b.Line {
|
||||
return 1
|
||||
}
|
||||
if a.Column < b.Column {
|
||||
return -1
|
||||
}
|
||||
if a.Column > b.Column {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s Span) HasPosition() bool { return s.v.Start.hasPosition() }
|
||||
func (s Span) HasOffset() bool { return s.v.Start.hasOffset() }
|
||||
func (s Span) IsValid() bool { return s.v.Start.isValid() }
|
||||
func (s Span) IsPoint() bool { return s.v.Start == s.v.End }
|
||||
func (s Span) URI() URI { return s.v.URI }
|
||||
func (s Span) Start() Point { return Point{s.v.Start} }
|
||||
func (s Span) End() Point { return Point{s.v.End} }
|
||||
func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) }
|
||||
func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) }
|
||||
|
||||
func (p Point) HasPosition() bool { return p.v.hasPosition() }
|
||||
func (p Point) HasOffset() bool { return p.v.hasOffset() }
|
||||
func (p Point) IsValid() bool { return p.v.isValid() }
|
||||
func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) }
|
||||
func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) }
|
||||
func (p Point) Line() int {
|
||||
if !p.v.hasPosition() {
|
||||
panic(fmt.Errorf("position not set in %v", p.v))
|
||||
}
|
||||
return p.v.Line
|
||||
}
|
||||
func (p Point) Column() int {
|
||||
if !p.v.hasPosition() {
|
||||
panic(fmt.Errorf("position not set in %v", p.v))
|
||||
}
|
||||
return p.v.Column
|
||||
}
|
||||
func (p Point) Offset() int {
|
||||
if !p.v.hasOffset() {
|
||||
panic(fmt.Errorf("offset not set in %v", p.v))
|
||||
}
|
||||
return p.v.Offset
|
||||
}
|
||||
|
||||
func (p point) hasPosition() bool { return p.Line > 0 }
|
||||
func (p point) hasOffset() bool { return p.Offset >= 0 }
|
||||
func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() }
|
||||
func (p point) isZero() bool {
|
||||
return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0)
|
||||
}
|
||||
|
||||
func (s *span) clean() {
|
||||
//this presumes the points are already clean
|
||||
if !s.End.isValid() || (s.End == point{}) {
|
||||
s.End = s.Start
|
||||
}
|
||||
}
|
||||
|
||||
func (p *point) clean() {
|
||||
if p.Line < 0 {
|
||||
p.Line = 0
|
||||
}
|
||||
if p.Column <= 0 {
|
||||
if p.Line > 0 {
|
||||
p.Column = 1
|
||||
} else {
|
||||
p.Column = 0
|
||||
}
|
||||
}
|
||||
if p.Offset == 0 && (p.Line > 1 || p.Column > 1) {
|
||||
p.Offset = -1
|
||||
}
|
||||
}
|
||||
|
||||
// Format implements fmt.Formatter to print the Location in a standard form.
|
||||
// The format produced is one that can be read back in using Parse.
|
||||
func (s Span) Format(f fmt.State, c rune) {
|
||||
fullForm := f.Flag('+')
|
||||
preferOffset := f.Flag('#')
|
||||
// we should always have a uri, simplify if it is file format
|
||||
//TODO: make sure the end of the uri is unambiguous
|
||||
uri := string(s.v.URI)
|
||||
if c == 'f' {
|
||||
uri = path.Base(uri)
|
||||
} else if !fullForm {
|
||||
uri = s.v.URI.Filename()
|
||||
}
|
||||
fmt.Fprint(f, uri)
|
||||
if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) {
|
||||
return
|
||||
}
|
||||
// see which bits of start to write
|
||||
printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition())
|
||||
printLine := s.HasPosition() && (fullForm || !printOffset)
|
||||
printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1))
|
||||
fmt.Fprint(f, ":")
|
||||
if printLine {
|
||||
fmt.Fprintf(f, "%d", s.v.Start.Line)
|
||||
}
|
||||
if printColumn {
|
||||
fmt.Fprintf(f, ":%d", s.v.Start.Column)
|
||||
}
|
||||
if printOffset {
|
||||
fmt.Fprintf(f, "#%d", s.v.Start.Offset)
|
||||
}
|
||||
// start is written, do we need end?
|
||||
if s.IsPoint() {
|
||||
return
|
||||
}
|
||||
// we don't print the line if it did not change
|
||||
printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line)
|
||||
fmt.Fprint(f, "-")
|
||||
if printLine {
|
||||
fmt.Fprintf(f, "%d", s.v.End.Line)
|
||||
}
|
||||
if printColumn {
|
||||
if printLine {
|
||||
fmt.Fprint(f, ":")
|
||||
}
|
||||
fmt.Fprintf(f, "%d", s.v.End.Column)
|
||||
}
|
||||
if printOffset {
|
||||
fmt.Fprintf(f, "#%d", s.v.End.Offset)
|
||||
}
|
||||
}
|
||||
|
||||
func (s Span) WithPosition(c Converter) (Span, error) {
|
||||
if err := s.update(c, true, false); err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s Span) WithOffset(c Converter) (Span, error) {
|
||||
if err := s.update(c, false, true); err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s Span) WithAll(c Converter) (Span, error) {
|
||||
if err := s.update(c, true, true); err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Span) update(c Converter, withPos, withOffset bool) error {
|
||||
if !s.IsValid() {
|
||||
return fmt.Errorf("cannot add information to an invalid span")
|
||||
}
|
||||
if withPos && !s.HasPosition() {
|
||||
if err := s.v.Start.updatePosition(c); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.v.End.Offset == s.v.Start.Offset {
|
||||
s.v.End = s.v.Start
|
||||
} else if err := s.v.End.updatePosition(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) {
|
||||
if err := s.v.Start.updateOffset(c); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column {
|
||||
s.v.End.Offset = s.v.Start.Offset
|
||||
} else if err := s.v.End.updateOffset(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *point) updatePosition(c Converter) error {
|
||||
line, col, err := c.ToPosition(p.Offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Line = line
|
||||
p.Column = col
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *point) updateOffset(c Converter) error {
|
||||
offset, err := c.ToOffset(p.Line, p.Column)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Offset = offset
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,161 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// Range represents a source code range in token.Pos form.
|
||||
// It also carries the FileSet that produced the positions, so that it is
|
||||
// self contained.
|
||||
type Range struct {
|
||||
FileSet *token.FileSet
|
||||
Start token.Pos
|
||||
End token.Pos
|
||||
}
|
||||
|
||||
// TokenConverter is a Converter backed by a token file set and file.
|
||||
// It uses the file set methods to work out the conversions, which
|
||||
// makes it fast and does not require the file contents.
|
||||
type TokenConverter struct {
|
||||
fset *token.FileSet
|
||||
file *token.File
|
||||
}
|
||||
|
||||
// NewRange creates a new Range from a FileSet and two positions.
|
||||
// To represent a point pass a 0 as the end pos.
|
||||
func NewRange(fset *token.FileSet, start, end token.Pos) Range {
|
||||
return Range{
|
||||
FileSet: fset,
|
||||
Start: start,
|
||||
End: end,
|
||||
}
|
||||
}
|
||||
|
||||
// NewTokenConverter returns an implementation of Converter backed by a
|
||||
// token.File.
|
||||
func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter {
|
||||
return &TokenConverter{fset: fset, file: f}
|
||||
}
|
||||
|
||||
// NewContentConverter returns an implementation of Converter for the
|
||||
// given file content.
|
||||
func NewContentConverter(filename string, content []byte) *TokenConverter {
|
||||
fset := token.NewFileSet()
|
||||
f := fset.AddFile(filename, -1, len(content))
|
||||
f.SetLinesForContent(content)
|
||||
return &TokenConverter{fset: fset, file: f}
|
||||
}
|
||||
|
||||
// IsPoint returns true if the range represents a single point.
|
||||
func (r Range) IsPoint() bool {
|
||||
return r.Start == r.End
|
||||
}
|
||||
|
||||
// Span converts a Range to a Span that represents the Range.
|
||||
// It will fill in all the members of the Span, calculating the line and column
|
||||
// information.
|
||||
func (r Range) Span() (Span, error) {
|
||||
f := r.FileSet.File(r.Start)
|
||||
if f == nil {
|
||||
return Span{}, fmt.Errorf("file not found in FileSet")
|
||||
}
|
||||
s := Span{}
|
||||
var err error
|
||||
s.v.Start.Offset, err = offset(f, r.Start)
|
||||
if err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
if r.End.IsValid() {
|
||||
s.v.End.Offset, err = offset(f, r.End)
|
||||
if err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
}
|
||||
// In the presence of line directives, a single File can have sections from
|
||||
// multiple file names.
|
||||
filename := f.Position(r.Start).Filename
|
||||
if r.End.IsValid() {
|
||||
if endFilename := f.Position(r.End).Filename; filename != endFilename {
|
||||
return Span{}, fmt.Errorf("span begins in file %q but ends in %q", filename, endFilename)
|
||||
}
|
||||
}
|
||||
s.v.URI = FileURI(filename)
|
||||
|
||||
s.v.Start.clean()
|
||||
s.v.End.clean()
|
||||
s.v.clean()
|
||||
converter := NewTokenConverter(r.FileSet, f)
|
||||
return s.WithPosition(converter)
|
||||
}
|
||||
|
||||
// offset is a copy of the Offset function in go/token, but with the adjustment
|
||||
// that it does not panic on invalid positions.
|
||||
func offset(f *token.File, pos token.Pos) (int, error) {
|
||||
if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() {
|
||||
return 0, fmt.Errorf("invalid pos")
|
||||
}
|
||||
return int(pos) - f.Base(), nil
|
||||
}
|
||||
|
||||
// Range converts a Span to a Range that represents the Span for the supplied
|
||||
// File.
|
||||
func (s Span) Range(converter *TokenConverter) (Range, error) {
|
||||
s, err := s.WithOffset(converter)
|
||||
if err != nil {
|
||||
return Range{}, err
|
||||
}
|
||||
// go/token will panic if the offset is larger than the file's size,
|
||||
// so check here to avoid panicking.
|
||||
if s.Start().Offset() > converter.file.Size() {
|
||||
return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size())
|
||||
}
|
||||
if s.End().Offset() > converter.file.Size() {
|
||||
return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size())
|
||||
}
|
||||
return Range{
|
||||
FileSet: converter.fset,
|
||||
Start: converter.file.Pos(s.Start().Offset()),
|
||||
End: converter.file.Pos(s.End().Offset()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *TokenConverter) ToPosition(offset int) (int, int, error) {
|
||||
if offset > l.file.Size() {
|
||||
return 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, l.file.Size())
|
||||
}
|
||||
pos := l.file.Pos(offset)
|
||||
p := l.fset.Position(pos)
|
||||
if offset == l.file.Size() {
|
||||
return p.Line + 1, 1, nil
|
||||
}
|
||||
return p.Line, p.Column, nil
|
||||
}
|
||||
|
||||
func (l *TokenConverter) ToOffset(line, col int) (int, error) {
|
||||
if line < 0 {
|
||||
return -1, fmt.Errorf("line is not valid")
|
||||
}
|
||||
lineMax := l.file.LineCount() + 1
|
||||
if line > lineMax {
|
||||
return -1, fmt.Errorf("line is beyond end of file %v", lineMax)
|
||||
} else if line == lineMax {
|
||||
if col > 1 {
|
||||
return -1, fmt.Errorf("column is beyond end of file")
|
||||
}
|
||||
// at the end of the file, allowing for a trailing eol
|
||||
return l.file.Size(), nil
|
||||
}
|
||||
pos := lineStart(l.file, line)
|
||||
if !pos.IsValid() {
|
||||
return -1, fmt.Errorf("line is not in file")
|
||||
}
|
||||
// we assume that column is in bytes here, and that the first byte of a
|
||||
// line is at column 1
|
||||
pos += token.Pos(col - 1)
|
||||
return offset(l.file, pos)
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.12
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go
|
||||
// versions <= 1.11, we borrow logic from the analysisutil package.
|
||||
// TODO(rstambler): Delete this file when we no longer support Go 1.11.
|
||||
func lineStart(f *token.File, line int) token.Pos {
|
||||
// Use binary search to find the start offset of this line.
|
||||
|
||||
min := 0 // inclusive
|
||||
max := f.Size() // exclusive
|
||||
for {
|
||||
offset := (min + max) / 2
|
||||
pos := f.Pos(offset)
|
||||
posn := f.Position(pos)
|
||||
if posn.Line == line {
|
||||
return pos - (token.Pos(posn.Column) - 1)
|
||||
}
|
||||
|
||||
if min+1 >= max {
|
||||
return token.NoPos
|
||||
}
|
||||
|
||||
if posn.Line < line {
|
||||
min = offset
|
||||
} else {
|
||||
max = offset
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.12
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// TODO(rstambler): Delete this file when we no longer support Go 1.11.
|
||||
func lineStart(f *token.File, line int) token.Pos {
|
||||
return f.LineStart(line)
|
||||
}
|
|
@ -0,0 +1,152 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const fileScheme = "file"
|
||||
|
||||
// URI represents the full URI for a file.
|
||||
type URI string
|
||||
|
||||
// Filename returns the file path for the given URI.
|
||||
// It is an error to call this on a URI that is not a valid filename.
|
||||
func (uri URI) Filename() string {
|
||||
filename, err := filename(uri)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return filepath.FromSlash(filename)
|
||||
}
|
||||
|
||||
func filename(uri URI) (string, error) {
|
||||
if uri == "" {
|
||||
return "", nil
|
||||
}
|
||||
u, err := url.ParseRequestURI(string(uri))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if u.Scheme != fileScheme {
|
||||
return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri)
|
||||
}
|
||||
if isWindowsDriveURI(u.Path) {
|
||||
u.Path = u.Path[1:]
|
||||
}
|
||||
return u.Path, nil
|
||||
}
|
||||
|
||||
// NewURI returns a span URI for the string.
|
||||
// It will attempt to detect if the string is a file path or uri.
|
||||
func NewURI(s string) URI {
|
||||
if u, err := url.PathUnescape(s); err == nil {
|
||||
s = u
|
||||
}
|
||||
if strings.HasPrefix(s, fileScheme+"://") {
|
||||
return URI(s)
|
||||
}
|
||||
return FileURI(s)
|
||||
}
|
||||
|
||||
func CompareURI(a, b URI) int {
|
||||
if equalURI(a, b) {
|
||||
return 0
|
||||
}
|
||||
if a < b {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func equalURI(a, b URI) bool {
|
||||
if a == b {
|
||||
return true
|
||||
}
|
||||
// If we have the same URI basename, we may still have the same file URIs.
|
||||
if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) {
|
||||
return false
|
||||
}
|
||||
fa, err := filename(a)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fb, err := filename(b)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Stat the files to check if they are equal.
|
||||
infoa, err := os.Stat(filepath.FromSlash(fa))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
infob, err := os.Stat(filepath.FromSlash(fb))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return os.SameFile(infoa, infob)
|
||||
}
|
||||
|
||||
// FileURI returns a span URI for the supplied file path.
|
||||
// It will always have the file scheme.
|
||||
func FileURI(path string) URI {
|
||||
if path == "" {
|
||||
return ""
|
||||
}
|
||||
// Handle standard library paths that contain the literal "$GOROOT".
|
||||
// TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT.
|
||||
const prefix = "$GOROOT"
|
||||
if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) {
|
||||
suffix := path[len(prefix):]
|
||||
path = runtime.GOROOT() + suffix
|
||||
}
|
||||
if !isWindowsDrivePath(path) {
|
||||
if abs, err := filepath.Abs(path); err == nil {
|
||||
path = abs
|
||||
}
|
||||
}
|
||||
// Check the file path again, in case it became absolute.
|
||||
if isWindowsDrivePath(path) {
|
||||
path = "/" + path
|
||||
}
|
||||
path = filepath.ToSlash(path)
|
||||
u := url.URL{
|
||||
Scheme: fileScheme,
|
||||
Path: path,
|
||||
}
|
||||
uri := u.String()
|
||||
if unescaped, err := url.PathUnescape(uri); err == nil {
|
||||
uri = unescaped
|
||||
}
|
||||
return URI(uri)
|
||||
}
|
||||
|
||||
// isWindowsDrivePath returns true if the file path is of the form used by
|
||||
// Windows. We check if the path begins with a drive letter, followed by a ":".
|
||||
func isWindowsDrivePath(path string) bool {
|
||||
if len(path) < 4 {
|
||||
return false
|
||||
}
|
||||
return unicode.IsLetter(rune(path[0])) && path[1] == ':'
|
||||
}
|
||||
|
||||
// isWindowsDriveURI returns true if the file URI is of the format used by
|
||||
// Windows URIs. The url.Parse package does not specially handle Windows paths
|
||||
// (see https://golang.org/issue/6027). We check if the URI path has
|
||||
// a drive prefix (e.g. "/C:"). If so, we trim the leading "/".
|
||||
func isWindowsDriveURI(uri string) bool {
|
||||
if len(uri) < 4 {
|
||||
return false
|
||||
}
|
||||
return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':'
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode/utf16"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ToUTF16Column calculates the utf16 column expressed by the point given the
|
||||
// supplied file contents.
|
||||
// This is used to convert from the native (always in bytes) column
|
||||
// representation and the utf16 counts used by some editors.
|
||||
func ToUTF16Column(p Point, content []byte) (int, error) {
|
||||
if content == nil {
|
||||
return -1, fmt.Errorf("ToUTF16Column: missing content")
|
||||
}
|
||||
if !p.HasPosition() {
|
||||
return -1, fmt.Errorf("ToUTF16Column: point is missing position")
|
||||
}
|
||||
if !p.HasOffset() {
|
||||
return -1, fmt.Errorf("ToUTF16Column: point is missing offset")
|
||||
}
|
||||
offset := p.Offset() // 0-based
|
||||
colZero := p.Column() - 1 // 0-based
|
||||
if colZero == 0 {
|
||||
// 0-based column 0, so it must be chr 1
|
||||
return 1, nil
|
||||
} else if colZero < 0 {
|
||||
return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero)
|
||||
}
|
||||
// work out the offset at the start of the line using the column
|
||||
lineOffset := offset - colZero
|
||||
if lineOffset < 0 || offset > len(content) {
|
||||
return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content))
|
||||
}
|
||||
// Use the offset to pick out the line start.
|
||||
// This cannot panic: offset > len(content) and lineOffset < offset.
|
||||
start := content[lineOffset:]
|
||||
|
||||
// Now, truncate down to the supplied column.
|
||||
start = start[:colZero]
|
||||
|
||||
// and count the number of utf16 characters
|
||||
// in theory we could do this by hand more efficiently...
|
||||
return len(utf16.Encode([]rune(string(start)))) + 1, nil
|
||||
}
|
||||
|
||||
// FromUTF16Column advances the point by the utf16 character offset given the
|
||||
// supplied line contents.
|
||||
// This is used to convert from the utf16 counts used by some editors to the
|
||||
// native (always in bytes) column representation.
|
||||
func FromUTF16Column(p Point, chr int, content []byte) (Point, error) {
|
||||
if !p.HasOffset() {
|
||||
return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset")
|
||||
}
|
||||
// if chr is 1 then no adjustment needed
|
||||
if chr <= 1 {
|
||||
return p, nil
|
||||
}
|
||||
if p.Offset() >= len(content) {
|
||||
return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content))
|
||||
}
|
||||
remains := content[p.Offset():]
|
||||
// scan forward the specified number of characters
|
||||
for count := 1; count < chr; count++ {
|
||||
if len(remains) <= 0 {
|
||||
return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content")
|
||||
}
|
||||
r, w := utf8.DecodeRune(remains)
|
||||
if r == '\n' {
|
||||
// Per the LSP spec:
|
||||
//
|
||||
// > If the character value is greater than the line length it
|
||||
// > defaults back to the line length.
|
||||
break
|
||||
}
|
||||
remains = remains[w:]
|
||||
if r >= 0x10000 {
|
||||
// a two point rune
|
||||
count++
|
||||
// if we finished in a two point rune, do not advance past the first
|
||||
if count >= chr {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.v.Column += w
|
||||
p.v.Offset += w
|
||||
}
|
||||
return p, nil
|
||||
}
|
|
@ -510,7 +510,8 @@ golang.org/x/text/unicode/norm
|
|||
golang.org/x/text/width
|
||||
# golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
|
||||
golang.org/x/time/rate
|
||||
# golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac
|
||||
# golang.org/x/tools v0.0.0-20191124021906-f5828fc9a103
|
||||
golang.org/x/tools/cmd/goimports
|
||||
golang.org/x/tools/go/analysis
|
||||
golang.org/x/tools/go/analysis/passes/inspect
|
||||
golang.org/x/tools/go/ast/astutil
|
||||
|
@ -525,7 +526,10 @@ golang.org/x/tools/go/types/typeutil
|
|||
golang.org/x/tools/go/vcs
|
||||
golang.org/x/tools/internal/fastwalk
|
||||
golang.org/x/tools/internal/gopathwalk
|
||||
golang.org/x/tools/internal/imports
|
||||
golang.org/x/tools/internal/module
|
||||
golang.org/x/tools/internal/semver
|
||||
golang.org/x/tools/internal/span
|
||||
# gomodules.xyz/jsonpatch/v2 v2.0.1
|
||||
gomodules.xyz/jsonpatch/v2
|
||||
# google.golang.org/api v0.0.0-20181220000619-583d854617af
|
||||
|
|
Loading…
Reference in New Issue