agent, control: update logger to plog

This commit is contained in:
Gyu-Ho Lee 2016-10-31 21:05:58 -07:00
parent 1efae398f0
commit 3ceea9deb5
4 changed files with 73 additions and 73 deletions

View File

@ -133,7 +133,7 @@ func CommandFunc(cmd *cobra.Command, args []string) error {
capnslog.SetFormatter(capnslog.NewPrettyFormatter(f, false))
logger.Infof("started serving gRPC %s", globalFlags.GRPCPort)
plog.Infof("started serving gRPC %s", globalFlags.GRPCPort)
var (
grpcServer = grpc.NewServer()
@ -180,14 +180,14 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
monitorLogPath = filepath.Join(globalFlags.WorkingDirectory, monitorLogPath)
}
logger.Info("received gRPC request")
logger.Infof("working_directory: %q", globalFlags.WorkingDirectory)
logger.Infof("working_directory_zookeeper: %q", zkWorkingDir)
logger.Infof("data_directory_etcd: %q", etcdDataDir)
logger.Infof("data_directory_consul: %q", consulDataDir)
logger.Infof("data_directory_zookeeper: %q", zkDataDir)
logger.Infof("database_log_path: %q", databaseLogPath)
logger.Infof("monitor_log_path: %q", monitorLogPath)
plog.Info("received gRPC request")
plog.Infof("working_directory: %q", globalFlags.WorkingDirectory)
plog.Infof("working_directory_zookeeper: %q", zkWorkingDir)
plog.Infof("data_directory_etcd: %q", etcdDataDir)
plog.Infof("data_directory_consul: %q", consulDataDir)
plog.Infof("data_directory_zookeeper: %q", zkDataDir)
plog.Infof("database_log_path: %q", databaseLogPath)
plog.Infof("monitor_log_path: %q", monitorLogPath)
}
if r.Operation == Request_Start {
t.req = *r
@ -257,20 +257,20 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
cmd.Stderr = f
cmdString := fmt.Sprintf("%s %s", cmd.Path, flagString)
logger.Infof("starting binary %q", cmdString)
plog.Infof("starting binary %q", cmdString)
if err := cmd.Start(); err != nil {
return nil, err
}
t.cmd = cmd
t.pid = cmd.Process.Pid
logger.Infof("started binary %q [PID: %d]", cmdString, t.pid)
plog.Infof("started binary %q [PID: %d]", cmdString, t.pid)
processPID = t.pid
go func() {
if err := cmd.Wait(); err != nil {
logger.Error("cmd.Wait %q returned error %v", cmdString, err)
plog.Error("cmd.Wait %q returned error %v", cmdString, err)
return
}
logger.Infof("exiting %q", cmdString)
plog.Infof("exiting %q", cmdString)
}()
case Request_ZooKeeper:
@ -279,18 +279,18 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
return nil, err
}
logger.Infof("os.Chdir %q", zkWorkingDir)
plog.Infof("os.Chdir %q", zkWorkingDir)
if err := os.Chdir(zkWorkingDir); err != nil {
return nil, err
}
logger.Infof("os.MkdirAll %q", zkDataDir)
plog.Infof("os.MkdirAll %q", zkDataDir)
if err := os.MkdirAll(zkDataDir, 0777); err != nil {
return nil, err
}
idFilePath := filepath.Join(zkDataDir, "myid")
logger.Infof("writing zk myid file %d in %s", t.req.ZookeeperMyID, idFilePath)
plog.Infof("writing zk myid file %d in %s", t.req.ZookeeperMyID, idFilePath)
if err := toFile(fmt.Sprintf("%d", t.req.ZookeeperMyID), idFilePath); err != nil {
return nil, err
}
@ -313,7 +313,7 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
zc := buf.String()
configFilePath := filepath.Join(zkWorkingDir, zkConfigPath)
logger.Infof("writing zk config file %q (config %q)", configFilePath, zc)
plog.Infof("writing zk config file %q (config %q)", configFilePath, zc)
if err := toFile(zc, configFilePath); err != nil {
return nil, err
}
@ -333,20 +333,20 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
cmd.Stderr = f
cmdString := fmt.Sprintf("%s %s", cmd.Path, strings.Join(args[1:], " "))
logger.Infof("starting binary %q", cmdString)
plog.Infof("starting binary %q", cmdString)
if err := cmd.Start(); err != nil {
return nil, err
}
t.cmd = cmd
t.pid = cmd.Process.Pid
logger.Infof("started binary %q [PID: %d]", cmdString, t.pid)
plog.Infof("started binary %q [PID: %d]", cmdString, t.pid)
processPID = t.pid
go func() {
if err := cmd.Wait(); err != nil {
logger.Error("cmd.Wait returned error", cmdString, err)
plog.Error("cmd.Wait returned error", cmdString, err)
return
}
logger.Infof("exiting %q (%v)", cmdString, err)
plog.Infof("exiting %q (%v)", cmdString, err)
}()
case Request_Consul:
@ -392,20 +392,20 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
cmd.Stderr = f
cmdString := fmt.Sprintf("%s %s", cmd.Path, flagString)
logger.Infof("starting binary %q", cmdString)
plog.Infof("starting binary %q", cmdString)
if err := cmd.Start(); err != nil {
return nil, err
}
t.cmd = cmd
t.pid = cmd.Process.Pid
logger.Infof("started binary %q [PID: %d]", cmdString, t.pid)
plog.Infof("started binary %q [PID: %d]", cmdString, t.pid)
processPID = t.pid
go func() {
if err := cmd.Wait(); err != nil {
logger.Error("cmd.Wait returned error", cmdString, err)
plog.Error("cmd.Wait returned error", cmdString, err)
return
}
logger.Infof("exiting", cmdString, err)
plog.Infof("exiting", cmdString, err)
}()
default:
@ -417,7 +417,7 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
return nil, fmt.Errorf("nil command")
}
logger.Infof("restarting database %q", t.req.Database.String())
plog.Infof("restarting database %q", t.req.Database.String())
if r.Database == Request_ZooKeeper {
if err := os.Chdir(zkWorkingDir); err != nil {
return nil, err
@ -435,20 +435,20 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
cmd.Stderr = f
cmdString := strings.Join(t.cmd.Args, " ")
logger.Infof("restarting binary %q", cmdString)
plog.Infof("restarting binary %q", cmdString)
if err := cmd.Start(); err != nil {
return nil, err
}
t.cmd = cmd
t.pid = cmd.Process.Pid
logger.Infof("restarted binary %q [PID: %d]", cmdString, t.pid)
plog.Infof("restarted binary %q [PID: %d]", cmdString, t.pid)
processPID = t.pid
go func() {
if err := cmd.Wait(); err != nil {
logger.Errorf("cmd.Wait %q returned error (%v)", cmdString, err)
plog.Errorf("cmd.Wait %q returned error (%v)", cmdString, err)
return
}
logger.Infof("exiting %q", cmdString)
plog.Infof("exiting %q", cmdString)
}()
case Request_Stop:
@ -456,14 +456,14 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
if t.cmd == nil {
return nil, fmt.Errorf("nil command")
}
logger.Infof("stopping binary %q [PID: %d]", t.req.Database.String(), t.pid)
plog.Infof("stopping binary %q [PID: %d]", t.req.Database.String(), t.pid)
if err := syscall.Kill(t.pid, syscall.SIGTERM); err != nil {
return nil, err
}
if t.logfile != nil {
t.logfile.Close()
}
logger.Infof("stopped binary %q [PID: %d]", t.req.Database.String(), t.pid)
plog.Infof("stopped binary %q [PID: %d]", t.req.Database.String(), t.pid)
processPID = t.pid
databaseStopped <- struct{}{}
@ -491,10 +491,10 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
return process.WriteToCSV(f, pss...)
}
logger.Infof("saving monitoring results for %q in %q", t.req.Database.String(), monitorLogPath)
plog.Infof("saving monitoring results for %q in %q", t.req.Database.String(), monitorLogPath)
var err error
if err = rFunc(); err != nil {
logger.Errorf("monitoring error (%v)", err)
plog.Errorf("monitoring error (%v)", err)
return
}
@ -502,19 +502,19 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
select {
case <-time.After(time.Second):
if err = rFunc(); err != nil {
logger.Errorf("monitoring error (%v)", err)
plog.Errorf("monitoring error (%v)", err)
continue
}
case sig := <-notifier:
logger.Infof("signal received %q", sig.String())
plog.Infof("signal received %q", sig.String())
return
case <-databaseStopped:
logger.Infof("stopped monitoring, uploading to storage %q", t.req.GoogleCloudProjectName)
plog.Infof("stopped monitoring, uploading to storage %q", t.req.GoogleCloudProjectName)
u, err := remotestorage.NewGoogleCloudStorage([]byte(t.req.GoogleCloudStorageKey), t.req.GoogleCloudProjectName)
if err != nil {
logger.Errorf("remotestorage.NewGoogleCloudStorage error (%v)", err)
plog.Errorf("remotestorage.NewGoogleCloudStorage error (%v)", err)
return
}
@ -526,11 +526,11 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
}
dstDatabaseLogPath = filepath.Join(t.req.GoogleCloudStorageSubDirectory, dstDatabaseLogPath)
logger.Infof("uploading database log [%q -> %q]", srcDatabaseLogPath, dstDatabaseLogPath)
plog.Infof("uploading database log [%q -> %q]", srcDatabaseLogPath, dstDatabaseLogPath)
var uerr error
for k := 0; k < 30; k++ {
if uerr = u.UploadFile(t.req.GoogleCloudStorageBucketName, srcDatabaseLogPath, dstDatabaseLogPath); uerr != nil {
logger.Errorf("u.UploadFile error... sleep and retry... (%v)", uerr)
plog.Errorf("u.UploadFile error... sleep and retry... (%v)", uerr)
time.Sleep(2 * time.Second)
continue
} else {
@ -545,10 +545,10 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
}
dstMonitorResultPath = filepath.Join(t.req.GoogleCloudStorageSubDirectory, dstMonitorResultPath)
logger.Infof("uploading monitor results [%q -> %q]", srcMonitorResultPath, dstMonitorResultPath)
plog.Infof("uploading monitor results [%q -> %q]", srcMonitorResultPath, dstMonitorResultPath)
for k := 0; k < 30; k++ {
if uerr = u.UploadFile(t.req.GoogleCloudStorageBucketName, srcMonitorResultPath, dstMonitorResultPath); uerr != nil {
logger.Errorf("u.UploadFile error... sleep and retry... (%v)", uerr)
plog.Errorf("u.UploadFile error... sleep and retry... (%v)", uerr)
time.Sleep(2 * time.Second)
continue
} else {
@ -563,10 +563,10 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
}
dstAgentLogPath = filepath.Join(t.req.GoogleCloudStorageSubDirectory, dstAgentLogPath)
logger.Infof("uploading agent logs [%q -> %q]", srcAgentLogPath, dstAgentLogPath)
plog.Infof("uploading agent logs [%q -> %q]", srcAgentLogPath, dstAgentLogPath)
for k := 0; k < 30; k++ {
if uerr = u.UploadFile(t.req.GoogleCloudStorageBucketName, srcAgentLogPath, dstAgentLogPath); uerr != nil {
logger.Error("u.UploadFile error... sleep and retry... (%v)", uerr)
plog.Error("u.UploadFile error... sleep and retry... (%v)", uerr)
time.Sleep(2 * time.Second)
continue
} else {
@ -580,7 +580,7 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
}(processPID)
}
logger.Info("transfer success")
plog.Info("transfer success")
return &Response{Success: true}, nil
}

View File

@ -16,4 +16,4 @@ package agent
import "github.com/coreos/pkg/capnslog"
var logger = capnslog.NewPackageLogger("github.com/coreos/dbtester", "agent")
var plog = capnslog.NewPackageLogger("github.com/coreos/dbtester", "agent")

View File

@ -86,7 +86,7 @@ func CommandFunc(cmd *cobra.Command, args []string) error {
println()
if !cfg.Step1.Skip {
logger.Info("step 1: starting databases...")
plog.Info("step 1: starting databases...")
if err = step1(cfg); err != nil {
return err
}
@ -95,7 +95,7 @@ func CommandFunc(cmd *cobra.Command, args []string) error {
if !cfg.Step2.Skip {
println()
time.Sleep(5 * time.Second)
logger.Info("step 2: starting tests...")
plog.Info("step 2: starting tests...")
if err = step2(cfg); err != nil {
return err
}
@ -104,7 +104,7 @@ func CommandFunc(cmd *cobra.Command, args []string) error {
if !cfg.Step3.Skip {
println()
time.Sleep(5 * time.Second)
logger.Info("step 3: stopping databases...")
plog.Info("step 3: stopping databases...")
if err = step3(cfg); err != nil {
return err
}
@ -206,7 +206,7 @@ func step2(cfg Config) error {
}
for k, v := range totalKeysFunc(cfg.DatabaseEndpoints) {
logger.Infof("expected write total results [expected_total: %d | database: %q | endpoint: %q | number_of_keys: %d]", cfg.Step2.TotalRequests, cfg.Database, k, v)
plog.Infof("expected write total results [expected_total: %d | database: %q | endpoint: %q | number_of_keys: %d]", cfg.Step2.TotalRequests, cfg.Database, k, v)
}
case "read":
@ -214,7 +214,7 @@ func step2(cfg Config) error {
switch cfg.Database {
case "etcdv2":
logger.Infof("write started [request: PUT | key: %q | database: %q]", key, "etcdv2")
plog.Infof("write started [request: PUT | key: %q | database: %q]", key, "etcdv2")
var err error
for i := 0; i < 7; i++ {
clients := mustCreateClientsEtcdv2(cfg.DatabaseEndpoints, cfg.Step2.Connections)
@ -222,16 +222,16 @@ func step2(cfg Config) error {
if err != nil {
continue
}
logger.Infof("write done [request: PUT | key: %q | database: %q]", key, "etcdv2")
plog.Infof("write done [request: PUT | key: %q | database: %q]", key, "etcdv2")
break
}
if err != nil {
logger.Errorf("write error [request: PUT | key: %q | database: %q]", key, "etcdv2")
plog.Errorf("write error [request: PUT | key: %q | database: %q]", key, "etcdv2")
os.Exit(1)
}
case "etcdv3":
logger.Infof("write started [request: PUT | key: %q | database: %q]", key, "etcdv3")
plog.Infof("write started [request: PUT | key: %q | database: %q]", key, "etcdv3")
var err error
for i := 0; i < 7; i++ {
clients := mustCreateClientsEtcdv3(cfg.DatabaseEndpoints, etcdv3ClientCfg{
@ -243,16 +243,16 @@ func step2(cfg Config) error {
if err != nil {
continue
}
logger.Infof("write done [request: PUT | key: %q | database: %q]", key, "etcdv3")
plog.Infof("write done [request: PUT | key: %q | database: %q]", key, "etcdv3")
break
}
if err != nil {
logger.Errorf("write error [request: PUT | key: %q | database: %q]", key, "etcdv3")
plog.Errorf("write error [request: PUT | key: %q | database: %q]", key, "etcdv3")
os.Exit(1)
}
case "zk", "zookeeper":
logger.Infof("write started [request: PUT | key: %q | database: %q]", key, "zookeeper")
plog.Infof("write started [request: PUT | key: %q | database: %q]", key, "zookeeper")
var err error
for i := 0; i < 7; i++ {
conns := mustCreateConnsZk(cfg.DatabaseEndpoints, cfg.Step2.Connections)
@ -263,16 +263,16 @@ func step2(cfg Config) error {
for j := range conns {
conns[j].Close()
}
logger.Infof("write done [request: PUT | key: %q | database: %q]", key, "zookeeper")
plog.Infof("write done [request: PUT | key: %q | database: %q]", key, "zookeeper")
break
}
if err != nil {
logger.Errorf("write error [request: PUT | key: %q | database: %q]", key, "zookeeper")
plog.Errorf("write error [request: PUT | key: %q | database: %q]", key, "zookeeper")
os.Exit(1)
}
case "consul":
logger.Infof("write started [request: PUT | key: %q | database: %q]", key, "consul")
plog.Infof("write started [request: PUT | key: %q | database: %q]", key, "consul")
var err error
for i := 0; i < 7; i++ {
clients := mustCreateConnsConsul(cfg.DatabaseEndpoints, cfg.Step2.Connections)
@ -280,11 +280,11 @@ func step2(cfg Config) error {
if err != nil {
continue
}
logger.Infof("write done [request: PUT | key: %q | database: %q]", key, "consul")
plog.Infof("write done [request: PUT | key: %q | database: %q]", key, "consul")
break
}
if err != nil {
logger.Errorf("write done [request: PUT | key: %q | database: %q]", key, "consul")
plog.Errorf("write done [request: PUT | key: %q | database: %q]", key, "consul")
os.Exit(1)
}
}
@ -297,7 +297,7 @@ func step2(cfg Config) error {
generateReport(cfg, h, reqGen)
case "read-oneshot":
key, value := sameKey(cfg.Step2.KeySize), vals.strings[0]
logger.Infof("writing key for read-oneshot [key: %q | database: %q]", key, cfg.Database)
plog.Infof("writing key for read-oneshot [key: %q | database: %q]", key, cfg.Database)
var err error
switch cfg.Database {
case "etcdv2":
@ -319,7 +319,7 @@ func step2(cfg Config) error {
_, err = clients[0].Put(&consulapi.KVPair{Key: key, Value: vals.bytes[0]}, nil)
}
if err != nil {
logger.Errorf("write error on read-oneshot (%v)", err)
plog.Errorf("write error on read-oneshot (%v)", err)
os.Exit(1)
}
@ -368,11 +368,11 @@ func sendReq(ep string, req agent.Request, i int) error {
req.ServerIndex = uint32(i)
req.ZookeeperMyID = uint32(i + 1)
logger.Infof("sending message [index: %d | operation: %q | database: %q | endpoint: %q]", i, req.Operation.String(), req.Database.String(), ep)
plog.Infof("sending message [index: %d | operation: %q | database: %q | endpoint: %q]", i, req.Operation.String(), req.Database.String(), ep)
conn, err := grpc.Dial(ep, grpc.WithInsecure())
if err != nil {
logger.Errorf("grpc.Dial connecting error (%v) [index: %d | endpoint: %q]", err, i, ep)
plog.Errorf("grpc.Dial connecting error (%v) [index: %d | endpoint: %q]", err, i, ep)
return err
}
@ -383,11 +383,11 @@ func sendReq(ep string, req agent.Request, i int) error {
resp, err := cli.Transfer(ctx, &req)
cancel()
if err != nil {
logger.Errorf("cli.Transfer error (%v) [index: %d | endpoint: %q]", err, i, ep)
plog.Errorf("cli.Transfer error (%v) [index: %d | endpoint: %q]", err, i, ep)
return err
}
logger.Infof("got response [index: %d | endpoint: %q | response: %+v]", i, ep, resp)
plog.Infof("got response [index: %d | endpoint: %q | response: %+v]", i, ep, resp)
return nil
}
@ -458,7 +458,7 @@ func newWriteHandlers(cfg Config) (rhs []ReqHandler, done func()) {
if cfg.Step2.SameKey {
key := sameKey(cfg.Step2.KeySize)
valueBts := randBytes(cfg.Step2.ValueSize)
logger.Infof("write started [request: PUT | key: %q | database: %q]", key, "zookeeper")
plog.Infof("write started [request: PUT | key: %q | database: %q]", key, "zookeeper")
var err error
for i := 0; i < 7; i++ {
conns := mustCreateConnsZk(cfg.DatabaseEndpoints, cfg.Step2.Connections)
@ -469,11 +469,11 @@ func newWriteHandlers(cfg Config) (rhs []ReqHandler, done func()) {
for j := range conns {
conns[j].Close()
}
logger.Infof("write done [request: PUT | key: %q | database: %q]", key, "zookeeper")
plog.Infof("write done [request: PUT | key: %q | database: %q]", key, "zookeeper")
break
}
if err != nil {
logger.Errorf("write error [request: PUT | key: %q | database: %q]", key, "zookeeper")
plog.Errorf("write error [request: PUT | key: %q | database: %q]", key, "zookeeper")
os.Exit(1)
}
}
@ -578,7 +578,7 @@ func generateWrites(cfg Config, vals values, requests chan<- request) {
}()
for i := 0; i < cfg.Step2.TotalRequests; i++ {
if cfg.Database == "etcdv3" && cfg.Step2.Etcdv3CompactionCycle > 0 && i%cfg.Step2.Etcdv3CompactionCycle == 0 {
logger.Infof("starting compaction [index: %d | database: %q]", i, "etcdv3")
plog.Infof("starting compaction [index: %d | database: %q]", i, "etcdv3")
wg.Add(1)
go func() {
defer wg.Done()

View File

@ -16,4 +16,4 @@ package control
import "github.com/coreos/pkg/capnslog"
var logger = capnslog.NewPackageLogger("github.com/coreos/dbtester", "control")
var plog = capnslog.NewPackageLogger("github.com/coreos/dbtester", "control")