lint: fix some unused parameter issues (#4956)

This commit is contained in:
Ryan Leung 2021-11-10 03:18:21 +08:00 committed by GitHub
parent 59e024e4c7
commit 14ebd917f2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 62 additions and 85 deletions

View File

@ -154,21 +154,21 @@ func parsePrincipalNames(principalNames []string) []*v3rbacpb.Principal {
return ps
}
func parsePeer(source peer) (*v3rbacpb.Principal, error) {
func parsePeer(source peer) *v3rbacpb.Principal {
if source.Principals == nil {
return &v3rbacpb.Principal{
Identifier: &v3rbacpb.Principal_Any{
Any: true,
},
}, nil
}
}
if len(source.Principals) == 0 {
return &v3rbacpb.Principal{
Identifier: &v3rbacpb.Principal_Authenticated_{
Authenticated: &v3rbacpb.Principal_Authenticated{},
}}, nil
}}
}
return principalOr(parsePrincipalNames(source.Principals)), nil
return principalOr(parsePrincipalNames(source.Principals))
}
func parsePaths(paths []string) []*v3rbacpb.Permission {
@ -257,17 +257,13 @@ func parseRules(rules []rule, prefixName string) (map[string]*v3rbacpb.Policy, e
if rule.Name == "" {
return policies, fmt.Errorf(`%d: "name" is not present`, i)
}
principal, err := parsePeer(rule.Source)
if err != nil {
return nil, fmt.Errorf("%d: %v", i, err)
}
permission, err := parseRequest(rule.Request)
if err != nil {
return nil, fmt.Errorf("%d: %v", i, err)
}
policyName := prefixName + "_" + rule.Name
policies[policyName] = &v3rbacpb.Policy{
Principals: []*v3rbacpb.Principal{principal},
Principals: []*v3rbacpb.Principal{parsePeer(rule.Source)},
Permissions: []*v3rbacpb.Permission{permission},
}
}

View File

@ -156,7 +156,7 @@ func TestShouldThrottleOptions(t *testing.T) {
for _, test := range testcases {
t.Run(test.desc, func(t *testing.T) {
m.SetNanos(0)
th := newWithArgs(time.Duration(time.Nanosecond), 1, test.ratioForAccepts, test.requestsPadding)
th := newWithArgs(time.Nanosecond, 1, test.ratioForAccepts, test.requestsPadding)
for i, response := range responses {
if response != E {
th.RegisterBackendResponse(response == T)

View File

@ -64,7 +64,7 @@ type lookupCallback func(targets []string, headerData string, err error)
// lookup starts a RouteLookup RPC in a separate goroutine and returns the
// results (and error, if any) in the provided callback.
func (c *rlsClient) lookup(path string, keyMap map[string]string, cb lookupCallback) {
func (c *rlsClient) lookup(keyMap map[string]string, cb lookupCallback) {
go func() {
ctx, cancel := context.WithTimeout(context.Background(), c.rpcTimeout)
resp, err := c.stub.RouteLookup(ctx, &rlspb.RouteLookupRequest{

View File

@ -71,7 +71,7 @@ func (s) TestLookupFailure(t *testing.T) {
rlsClient := newRLSClient(cc, defaultDialTarget, defaultRPCTimeout)
errCh := testutils.NewChannel()
rlsClient.lookup("", nil, func(targets []string, headerData string, err error) {
rlsClient.lookup(nil, func(targets []string, headerData string, err error) {
if err == nil {
errCh.Send(errors.New("rlsClient.lookup() succeeded, should have failed"))
return
@ -101,7 +101,7 @@ func (s) TestLookupDeadlineExceeded(t *testing.T) {
rlsClient := newRLSClient(cc, defaultDialTarget, 100*time.Millisecond)
errCh := testutils.NewChannel()
rlsClient.lookup("", nil, func(_ []string, _ string, err error) {
rlsClient.lookup(nil, func(_ []string, _ string, err error) {
if st, ok := status.FromError(err); !ok || st.Code() != codes.DeadlineExceeded {
errCh.Send(fmt.Errorf("rlsClient.lookup() returned error: %v, want %v", err, codes.DeadlineExceeded))
return
@ -121,10 +121,7 @@ func (s) TestLookupSuccess(t *testing.T) {
server, cc, cleanup := setup(t)
defer cleanup()
const (
rlsReqPath = "/service/method"
wantHeaderData = "headerData"
)
const wantHeaderData = "headerData"
rlsReqKeyMap := map[string]string{
"k1": "v1",
@ -141,7 +138,7 @@ func (s) TestLookupSuccess(t *testing.T) {
rlsClient := newRLSClient(cc, defaultDialTarget, defaultRPCTimeout)
errCh := testutils.NewChannel()
rlsClient.lookup(rlsReqPath, rlsReqKeyMap, func(targets []string, hd string, err error) {
rlsClient.lookup(rlsReqKeyMap, func(targets []string, hd string, err error) {
if err != nil {
errCh.Send(fmt.Errorf("rlsClient.Lookup() failed: %v", err))
return

View File

@ -206,7 +206,7 @@ func streamBenchmark(start startFunc, stop stopFunc, bf stats.Features, s *stats
runBenchmark(caller, start, stop, bf, s, workloadsStreaming)
}
func unconstrainedStreamBenchmark(start startFunc, stop ucStopFunc, bf stats.Features, s *stats.Stats) {
func unconstrainedStreamBenchmark(start startFunc, stop ucStopFunc, bf stats.Features) {
var sender rpcSendFunc
var recver rpcRecvFunc
var cleanup rpcCleanupFunc
@ -771,7 +771,7 @@ func main() {
streamBenchmark(start, stop, bf, s)
}
if opts.rModes.unconstrained {
unconstrainedStreamBenchmark(start, ucStop, bf, s)
unconstrainedStreamBenchmark(start, ucStop, bf)
}
}
after(opts, s.GetResults())

View File

@ -86,7 +86,7 @@ func (s) TestConn(t *testing.T) {
wantSleeps(latency) // Connection creation delay.
// 1 kbps = 128 Bps. Divides evenly by 1 second using nanos.
byteLatency := time.Duration(time.Second / 128)
byteLatency := time.Second / 128
write := func(b []byte) {
n, err := c.Write(b)

View File

@ -252,13 +252,10 @@ func (te *test) tearDown() {
te.srv.Stop()
}
type testConfig struct {
}
// newTest returns a new test using the provided testing.T and
// environment. It is returned with default values. Tests should
// modify it before calling its startServer and clientConn methods.
func newTest(t *testing.T, tc *testConfig) *test {
func newTest(t *testing.T) *test {
te := &test{
t: t,
}
@ -794,8 +791,8 @@ func (ed *expectedData) toServerLogEntries() []*pb.GrpcLogEntry {
return ret
}
func runRPCs(t *testing.T, tc *testConfig, cc *rpcConfig) *expectedData {
te := newTest(t, tc)
func runRPCs(t *testing.T, cc *rpcConfig) *expectedData {
te := newTest(t)
te.startServer(&testServer{te: te})
defer te.tearDown()
@ -869,7 +866,7 @@ func equalLogEntry(entries ...*pb.GrpcLogEntry) (equal bool) {
func testClientBinaryLog(t *testing.T, c *rpcConfig) error {
defer testSink.clear()
expect := runRPCs(t, &testConfig{}, c)
expect := runRPCs(t, c)
want := expect.toClientLogEntries()
var got []*pb.GrpcLogEntry
// In racy cases, some entries are not logged when the RPC is finished (e.g.
@ -969,7 +966,7 @@ func (s) TestClientBinaryLogCancel(t *testing.T) {
func testServerBinaryLog(t *testing.T, c *rpcConfig) error {
defer testSink.clear()
expect := runRPCs(t, &testConfig{}, c)
expect := runRPCs(t, c)
want := expect.toServerLogEntries()
var got []*pb.GrpcLogEntry
// In racy cases, some entries are not logged when the RPC is finished (e.g.

View File

@ -204,9 +204,9 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 {
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
}
if pid == 0 {
db.get().addChannel(id, cn, true, pid, ref)
db.get().addChannel(id, cn, true, pid)
} else {
db.get().addChannel(id, cn, false, pid, ref)
db.get().addChannel(id, cn, false, pid)
}
return id
}
@ -228,7 +228,7 @@ func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
pid: pid,
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
}
db.get().addSubChannel(id, sc, pid, ref)
db.get().addSubChannel(id, sc, pid)
return id
}
@ -258,7 +258,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
}
id := idGen.genID()
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
db.get().addListenSocket(id, ls, pid, ref)
db.get().addListenSocket(id, ls, pid)
return id
}
@ -273,11 +273,11 @@ func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
}
id := idGen.genID()
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
db.get().addNormalSocket(id, ns, pid, ref)
db.get().addNormalSocket(id, ns, pid)
return id
}
// RemoveEntry removes an entry with unique channelz trakcing id to be id from
// RemoveEntry removes an entry with unique channelz tracking id to be id from
// channelz database.
func RemoveEntry(id int64) {
db.get().removeEntry(id)
@ -333,7 +333,7 @@ func (c *channelMap) addServer(id int64, s *server) {
c.mu.Unlock()
}
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) {
c.mu.Lock()
cn.cm = c
cn.trace.cm = c
@ -346,7 +346,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in
c.mu.Unlock()
}
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) {
c.mu.Lock()
sc.cm = c
sc.trace.cm = c
@ -355,7 +355,7 @@ func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref stri
c.mu.Unlock()
}
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) {
c.mu.Lock()
ls.cm = c
c.listenSockets[id] = ls
@ -363,7 +363,7 @@ func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref
c.mu.Unlock()
}
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) {
c.mu.Lock()
ns.cm = c
c.normalSockets[id] = ns

View File

@ -244,7 +244,7 @@ func (cb *CircularBuffer) Drain() []interface{} {
}
var wg sync.WaitGroup
wg.Add(int(len(qs)))
wg.Add(len(qs))
for i := 0; i < len(qs); i++ {
go func(qi int) {
qs[qi].drainWait()

View File

@ -148,7 +148,7 @@ func TestBalancerConfigMarshalJSON(t *testing.T) {
Name: testBalancerBuilderName,
Config: testBalancerConfig,
},
wantJSON: fmt.Sprintf(`[{"test-bb": {"check":true}}]`),
wantJSON: `[{"test-bb": {"check":true}}]`,
},
{
name: "OK config is nil",
@ -156,7 +156,7 @@ func TestBalancerConfigMarshalJSON(t *testing.T) {
Name: testBalancerBuilderNotParserName,
Config: nil, // nil should be marshalled to an empty config "{}".
},
wantJSON: fmt.Sprintf(`[{"test-bb-not-parser": {}}]`),
wantJSON: `[{"test-bb-not-parser": {}}]`,
},
}
for _, tt := range tests {
@ -172,7 +172,7 @@ func TestBalancerConfigMarshalJSON(t *testing.T) {
var bc BalancerConfig
if err := bc.UnmarshalJSON(b); err != nil {
t.Errorf("failed to mnmarshal: %v", err)
t.Errorf("failed to unmarshal: %v", err)
}
if !cmp.Equal(bc, tt.bc) {
t.Errorf("diff: %v", cmp.Diff(bc, tt.bc))

View File

@ -136,12 +136,10 @@ type inFlow struct {
// newLimit updates the inflow window to a new value n.
// It assumes that n is always greater than the old limit.
func (f *inFlow) newLimit(n uint32) uint32 {
func (f *inFlow) newLimit(n uint32) {
f.mu.Lock()
d := n - f.limit
f.limit = n
f.mu.Unlock()
return d
}
func (f *inFlow) maybeAdjust(n uint32) uint32 {

View File

@ -1557,7 +1557,7 @@ func minTime(a, b time.Duration) time.Duration {
return b
}
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
func (t *http2Client) keepalive() {
p := &ping{data: [8]byte{}}
// True iff a ping has been sent, and no data has been received since then.

View File

@ -37,7 +37,7 @@ var (
httpProxyFromEnvironment = http.ProxyFromEnvironment
)
func mapAddress(ctx context.Context, address string) (*url.URL, error) {
func mapAddress(address string) (*url.URL, error) {
req := &http.Request{
URL: &url.URL{
Scheme: "https",
@ -114,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
// connection.
func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) {
newAddr := addr
proxyURL, err := mapAddress(ctx, addr)
proxyURL, err := mapAddress(addr)
if err != nil {
return nil, err
}

View File

@ -211,11 +211,8 @@ func (s) TestMapAddressEnv(t *testing.T) {
}
defer overwrite(hpfe)()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
// envTestAddr should be handled by ProxyFromEnvironment.
got, err := mapAddress(ctx, envTestAddr)
got, err := mapAddress(envTestAddr)
if err != nil {
t.Error(err)
}

View File

@ -194,12 +194,12 @@ func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) {
}
}
func (h *testStreamHandler) handleStreamEncodingRequiredStatus(t *testing.T, s *Stream) {
func (h *testStreamHandler) handleStreamEncodingRequiredStatus(s *Stream) {
// raw newline is not accepted by http2 framer so it must be encoded.
h.t.WriteStatus(s, encodingTestStatus)
}
func (h *testStreamHandler) handleStreamInvalidHeaderField(t *testing.T, s *Stream) {
func (h *testStreamHandler) handleStreamInvalidHeaderField(s *Stream) {
headerFields := []hpack.HeaderField{}
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: expectedInvalidHeaderField})
h.t.controlBuf.put(&headerFrame{
@ -356,13 +356,13 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT
})
case encodingRequiredStatus:
go transport.HandleStreams(func(s *Stream) {
go h.handleStreamEncodingRequiredStatus(t, s)
go h.handleStreamEncodingRequiredStatus(s)
}, func(ctx context.Context, method string) context.Context {
return ctx
})
case invalidHeaderField:
go transport.HandleStreams(func(s *Stream) {
go h.handleStreamInvalidHeaderField(t, s)
go h.handleStreamInvalidHeaderField(s)
}, func(ctx context.Context, method string) context.Context {
return ctx
})

View File

@ -753,7 +753,7 @@ func checkEnd(t *testing.T, d *gotData, e *expectedData) {
}
}
func checkConnBegin(t *testing.T, d *gotData, e *expectedData) {
func checkConnBegin(t *testing.T, d *gotData) {
var (
ok bool
st *stats.ConnBegin
@ -767,7 +767,7 @@ func checkConnBegin(t *testing.T, d *gotData, e *expectedData) {
st.IsClient() // TODO remove this.
}
func checkConnEnd(t *testing.T, d *gotData, e *expectedData) {
func checkConnEnd(t *testing.T, d *gotData) {
var (
ok bool
st *stats.ConnEnd
@ -815,9 +815,9 @@ func checkConnStats(t *testing.T, got []*gotData) {
t.Fatalf("got %v stats, want even positive number", len(got))
}
// The first conn stats must be a ConnBegin.
checkConnBegin(t, got[0], nil)
checkConnBegin(t, got[0])
// The last conn stats must be a ConnEnd.
checkConnEnd(t, got[len(got)-1], nil)
checkConnEnd(t, got[len(got)-1])
}
func checkServerStats(t *testing.T, got []*gotData, expect *expectedData, checkFuncs []func(t *testing.T, d *gotData, e *expectedData)) {

View File

@ -299,12 +299,10 @@ func init() {
}
func (s) TestDoneLoads(t *testing.T) {
for _, e := range listTestEnv() {
testDoneLoads(t, e)
}
testDoneLoads(t)
}
func testDoneLoads(t *testing.T, e env) {
func testDoneLoads(t *testing.T) {
b := &testBalancer{}
balancer.Register(b)

View File

@ -36,7 +36,7 @@ const (
// setupTests creates a clusterHandler with a fake xds client for control over
// xds client.
func setupTests(t *testing.T) (*clusterHandler, *fakeclient.Client) {
func setupTests() (*clusterHandler, *fakeclient.Client) {
xdsC := fakeclient.NewClient()
ch := newClusterHandler(&cdsBalancer{xdsClient: xdsC})
return ch, xdsC
@ -83,7 +83,7 @@ func (s) TestSuccessCaseLeafNode(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ch, fakeClient := setupTests(t)
ch, fakeClient := setupTests()
// When you first update the root cluster, it should hit the code
// path which will start a cluster node for that root. Updating the
// root cluster logically represents a ping from a ClientConn.
@ -170,7 +170,7 @@ func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ch, fakeClient := setupTests(t)
ch, fakeClient := setupTests()
ch.updateRootCluster(test.clusterName)
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer ctxCancel()
@ -220,7 +220,7 @@ func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) {
// the children, and at the end there should be a successful clusterUpdate
// written to the update buffer to send back to CDS.
func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) {
ch, fakeClient := setupTests(t)
ch, fakeClient := setupTests()
ch.updateRootCluster(aggregateClusterService)
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
@ -342,7 +342,7 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) {
// This initial code is the same as the test for the aggregate success case,
// except without validations. This will get this test to the point where it
// can change one of the children.
ch, fakeClient := setupTests(t)
ch, fakeClient := setupTests()
ch.updateRootCluster(aggregateClusterService)
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
@ -449,7 +449,7 @@ func (s) TestUpdateRootClusterAggregateThenChangeRootToEDS(t *testing.T) {
// This initial code is the same as the test for the aggregate success case,
// except without validations. This will get this test to the point where it
// can update the root cluster to one of type EDS.
ch, fakeClient := setupTests(t)
ch, fakeClient := setupTests()
ch.updateRootCluster(aggregateClusterService)
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
@ -528,7 +528,7 @@ func (s) TestUpdateRootClusterAggregateThenChangeRootToEDS(t *testing.T) {
// TestHandleRespInvokedWithError tests that when handleResp is invoked with an
// error, that the error is successfully written to the update buffer.
func (s) TestHandleRespInvokedWithError(t *testing.T) {
ch, fakeClient := setupTests(t)
ch, fakeClient := setupTests()
ch.updateRootCluster(edsService)
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer ctxCancel()
@ -555,7 +555,7 @@ func (s) TestHandleRespInvokedWithError(t *testing.T) {
func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) {
// Getting the test to the point where there's a root cluster which is a eds
// leaf.
ch, fakeClient := setupTests(t)
ch, fakeClient := setupTests()
ch.updateRootCluster(edsService2)
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer ctxCancel()

View File

@ -31,7 +31,7 @@ type controlPlane struct {
bootstrapContent string
}
func newControlPlane(testName string) (*controlPlane, error) {
func newControlPlane() (*controlPlane, error) {
// Spin up an xDS management server on a local port.
server, err := e2e.StartManagementServer()
if err != nil {

View File

@ -32,12 +32,12 @@ import (
testpb "google.golang.org/grpc/interop/grpc_testing"
)
func cmd(path string, logger io.Writer, args []string, env []string) (*exec.Cmd, error) {
func cmd(path string, logger io.Writer, args []string, env []string) *exec.Cmd {
cmd := exec.Command(path, args...)
cmd.Env = append(os.Environ(), env...)
cmd.Stdout = logger
cmd.Stderr = logger
return cmd, nil
return cmd
}
const (
@ -53,7 +53,7 @@ type client struct {
// newClient create a client with the given target and bootstrap content.
func newClient(target, binaryPath, bootstrap string, logger io.Writer, flags ...string) (*client, error) {
cmd, err := cmd(
cmd := cmd(
binaryPath,
logger,
append([]string{
@ -68,9 +68,6 @@ func newClient(target, binaryPath, bootstrap string, logger io.Writer, flags ...
"GRPC_XDS_BOOTSTRAP_CONFIG=" + bootstrap, // The bootstrap content doesn't need to be quoted.
},
)
if err != nil {
return nil, fmt.Errorf("failed to run client cmd: %v", err)
}
cmd.Start()
cc, err := grpc.Dial(fmt.Sprintf("localhost:%d", clientStatsPort), grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)))
@ -150,7 +147,7 @@ func newServers(hostnamePrefix, binaryPath, bootstrap string, logger io.Writer,
}()
for i := 0; i < count; i++ {
port := serverPort + i
cmd, err := cmd(
cmd := cmd(
binaryPath,
logger,
[]string{
@ -163,9 +160,6 @@ func newServers(hostnamePrefix, binaryPath, bootstrap string, logger io.Writer,
"GRPC_XDS_BOOTSTRAP_CONFIG=" + bootstrap, // The bootstrap content doesn't need to be quoted.,
},
)
if err != nil {
return nil, fmt.Errorf("failed to run server cmd: %v", err)
}
cmd.Start()
ret = append(ret, &server{cmd: cmd, port: port})
}

View File

@ -58,7 +58,7 @@ func setup(t *testing.T, opts testOpts) (*controlPlane, *client, []*server) {
backendCount = opts.backendCount
}
cp, err := newControlPlane(opts.testName)
cp, err := newControlPlane()
if err != nil {
t.Fatalf("failed to start control-plane: %v", err)
}