Run `go vet` as part of the Travis CI. (#626)

* Run `go vet` as part of the Travis CI.

Also fix existing issues found by Go vet.

* Explicitly check for shadowing

* Fix shadowing problems throughout codebase

* Actually run all checks including shadow
This commit is contained in:
Ajay Gopinathan 2019-01-07 18:21:44 -08:00 committed by Kubernetes Prow Robot
parent be6664445b
commit 67a7373933
10 changed files with 42 additions and 42 deletions

View File

@ -48,4 +48,5 @@ script:
- cd $TRAVIS_BUILD_DIR/backend/src
- gimme -f 1.11.4
- source ~/.gimme/envs/go1.11.4.env
- go vet -all -shadow ./...
- go test ./...

View File

@ -118,12 +118,12 @@ func TestReportMetrics_Succeed(t *testing.T) {
&api.RunMetric{
Name: "accuracy",
NodeId: "node-1",
Value: &api.RunMetric_NumberValue{0.77},
Value: &api.RunMetric_NumberValue{NumberValue: 0.77},
},
&api.RunMetric{
Name: "logloss",
NodeId: "node-1",
Value: &api.RunMetric_NumberValue{1.2},
Value: &api.RunMetric_NumberValue{NumberValue: 1.2},
},
},
}
@ -301,12 +301,12 @@ func TestReportMetrics_InvalidMetricsJSON_PartialFail(t *testing.T) {
&api.RunMetric{
Name: "accuracy",
NodeId: "node-2",
Value: &api.RunMetric_NumberValue{0.77},
Value: &api.RunMetric_NumberValue{NumberValue: 0.77},
},
&api.RunMetric{
Name: "logloss",
NodeId: "node-2",
Value: &api.RunMetric_NumberValue{1.2},
Value: &api.RunMetric_NumberValue{NumberValue: 1.2},
},
},
}

View File

@ -27,7 +27,6 @@ import (
// creates a new client for the Kubernetes ScheduledWorkflow CRD.
func CreateScheduledWorkflowClientOrFatal(namespace string, initConnectionTimeout time.Duration) v1alpha1.ScheduledWorkflowInterface {
var swfClient v1alpha1.ScheduledWorkflowInterface
var err error
var operation = func() error {
restConfig, err := rest.InClusterConfig()
if err != nil {
@ -37,12 +36,12 @@ func CreateScheduledWorkflowClientOrFatal(namespace string, initConnectionTimeou
swfClient = swfClientSet.ScheduledworkflowV1alpha1().ScheduledWorkflows(namespace)
return nil
}
b := backoff.NewExponentialBackOff()
b.MaxElapsedTime = initConnectionTimeout
err = backoff.Retry(operation, b)
if err != nil {
if err := backoff.Retry(operation, b); err != nil {
glog.Fatalf("Failed to create scheduled workflow client. Error: %v", err)
}
return swfClient
}

View File

@ -151,23 +151,23 @@ func loadSamples(resourceManager *resource.ResourceManager) error {
File string
}
var configs []config
if err := json.Unmarshal(configBytes, &configs); err != nil {
return errors.New(fmt.Sprintf("Failed to read sample configurations. Err: %v", err.Error()))
if err = json.Unmarshal(configBytes, &configs); err != nil {
return errors.New(fmt.Sprintf("Failed to read sample configurations. Err: %v", err))
}
for _, config := range configs {
reader, err := os.Open(config.File)
if err != nil {
return errors.New(fmt.Sprintf("Failed to load sample %s. Error: %v", config.Name, err.Error()))
reader, configErr := os.Open(config.File)
if configErr != nil {
return errors.New(fmt.Sprintf("Failed to load sample %s. Error: %v", config.Name, configErr))
}
pipelineFile, err := server.ReadPipelineFile(config.File, reader, server.MaxFileLength)
if err != nil {
return errors.New(fmt.Sprintf("Failed to decompress the file %s. Error: %v", config.Name, err.Error()))
pipelineFile, configErr := server.ReadPipelineFile(config.File, reader, server.MaxFileLength)
if configErr != nil {
return errors.New(fmt.Sprintf("Failed to decompress the file %s. Error: %v", config.Name, configErr))
}
_, err = resourceManager.CreatePipeline(config.Name, config.Description, pipelineFile)
if err != nil {
_, configErr = resourceManager.CreatePipeline(config.Name, config.Description, pipelineFile)
if configErr != nil {
// Log the error but not fail. The API Server pod can restart and it could potentially cause name collision.
// In the future, we might consider loading samples during deployment, instead of when API server starts.
glog.Warningf(fmt.Sprintf("Failed to create pipeline for %s. Error: %v", config.Name, err.Error()))
glog.Warningf(fmt.Sprintf("Failed to create pipeline for %s. Error: %v", config.Name, configErr))
continue
}

View File

@ -194,15 +194,14 @@ func (r *ResourceManager) CreateRun(apiRun *api.Run) (*model.RunDetail, error) {
return nil, util.Wrap(err, "Failed to fetch workflow spec.")
}
var workflow util.Workflow
err = json.Unmarshal(workflowSpecManifestBytes, &workflow)
if err != nil {
if err = json.Unmarshal(workflowSpecManifestBytes, &workflow); err != nil {
return nil, util.NewInternalServerError(err,
"Failed to unmarshal workflow spec manifest. Workflow bytes: %s", string(workflowSpecManifestBytes))
}
parameters := toParametersMap(apiRun.GetPipelineSpec().GetParameters())
// Verify no additional parameter provided
if err := workflow.VerifyParameters(parameters); err != nil {
if err = workflow.VerifyParameters(parameters); err != nil {
return nil, util.Wrap(err, "Failed to verify parameters.")
}
// Append provided parameter

View File

@ -115,13 +115,13 @@ func TestToApiRuns(t *testing.T) {
apiMetric1 := &api.RunMetric{
Name: metric1.Name,
NodeId: metric1.NodeID,
Value: &api.RunMetric_NumberValue{metric1.NumberValue},
Value: &api.RunMetric_NumberValue{NumberValue: metric1.NumberValue},
Format: api.RunMetric_RAW,
}
apiMetric2 := &api.RunMetric{
Name: metric2.Name,
NodeId: metric2.NodeID,
Value: &api.RunMetric_NumberValue{metric2.NumberValue},
Value: &api.RunMetric_NumberValue{NumberValue: metric2.NumberValue},
Format: api.RunMetric_PERCENTAGE,
}
modelRun1 := model.Run{

View File

@ -52,15 +52,16 @@ func (s *DBStatusStore) InitializeDBStatusTable() error {
// The table is not initialized
if !rows.Next() {
sql, args, err := sq.
sql, args, queryErr := sq.
Insert("db_statuses").
SetMap(defaultDBStatus).
ToSql()
if err != nil {
if queryErr != nil {
tx.Rollback()
return util.NewInternalServerError(err, "Error creating query to initialize database status table.")
return util.NewInternalServerError(queryErr, "Error creating query to initialize database status table.")
}
_, err = tx.Exec(sql, args...)
if err != nil {
tx.Rollback()

View File

@ -41,22 +41,22 @@ func NewRootCmd(factory ClientFactoryInterface) *RootCommand {
pipelineClient, err := factory.CreatePipelineClient(root.ClientConfig(), root.debug)
if err != nil {
fmt.Errorf("Could not create pipelineClient: %v", err)
return fmt.Errorf("Could not create pipelineClient: %v", err)
}
jobClient, err := factory.CreateJobClient(root.ClientConfig(), root.debug)
if err != nil {
fmt.Errorf("Could not create jobClient: %v", err)
return fmt.Errorf("Could not create jobClient: %v", err)
}
runClient, err := factory.CreateRunClient(root.ClientConfig(), root.debug)
if err != nil {
fmt.Errorf("Could not create runClient: %v", err)
return fmt.Errorf("Could not create runClient: %v", err)
}
experimentClient, err := factory.CreateExperimentClient(root.ClientConfig(), root.debug)
if err != nil {
fmt.Errorf("Could not create experimentClient: %v", err)
return fmt.Errorf("Could not create experimentClient: %v", err)
}
root.pipelineUploadClient = pipelineUploadClient
root.pipelineClient = pipelineClient

View File

@ -262,28 +262,28 @@ func ToGRPCError(err error) error {
case *UserError:
userError := err.(*UserError)
stat := status.New(userError.externalStatusCode, userError.internalError.Error())
statWithDetail, err := stat.
statWithDetail, statErr := stat.
WithDetails(&api.Error{
ErrorMessage: userError.externalMessage,
ErrorDetails: userError.internalError.Error(),
})
if err != nil {
if statErr != nil {
// Failed to stream error message as proto.
glog.Errorf("Failed to stream gRpc error. Error to be streamed: %v Error: %v",
userError.String(), err.Error())
userError.String(), statErr)
return stat.Err()
}
return statWithDetail.Err()
default:
externalMessage := fmt.Sprintf("Internal error: %+v", err)
stat := status.New(codes.Internal, externalMessage)
statWithDetail, err := stat.
statWithDetail, statErr := stat.
WithDetails(&api.Error{ErrorMessage: externalMessage, ErrorDetails: externalMessage})
if err != nil {
if statErr != nil {
// Failed to stream error message as proto.
glog.Errorf("Failed to stream gRpc error. Error to be streamed: %v Error: %v",
externalMessage, err.Error())
externalMessage, statErr)
return stat.Err()
}
return statWithDetail.Err()

View File

@ -68,7 +68,7 @@ type ScheduledWorkflowSpec struct {
// Specification of the workflow to schedule.
// +optional
Workflow *WorkflowResource `json:"workflow, omitempty"`
Workflow *WorkflowResource `json:"workflow,omitempty"`
// TODO: support additional resource types: K8 jobs, etc.
@ -189,13 +189,13 @@ type ScheduledWorkflowCondition struct {
type TriggerStatus struct {
// Time of the last creation of a workflow.
LastTriggeredTime *metav1.Time `json:lastTriggeredTime,omitempty`
LastTriggeredTime *metav1.Time `json:"lastTriggeredTime,omitempty"`
// Time of the next creation of a workflow (assuming that the schedule is enabled).
NextTriggeredTime *metav1.Time `json:nextTriggeredTime,omitempty`
NextTriggeredTime *metav1.Time `json:"nextTriggeredTime,omitempty"`
// Index of the last workflow created.
LastIndex *int64 `json:lastWorkflowIndex,omitempty`
LastIndex *int64 `json:"lastWorkflowIndex,omitempty"`
}
type WorkflowHistory struct {
@ -220,7 +220,7 @@ type WorkflowStatus struct {
UID types.UID `json:"uid,omitempty"`
// Phase is a high level summary of the status of the workflow.
Phase v1alpha1.NodePhase `json:phase,omitempty`
Phase v1alpha1.NodePhase `json:"phase,omitempty"`
// A human readable message indicating details about why the workflow is in
// this condition.