mirror of https://github.com/docker/docs.git
commit
ec7d343a88
|
|
@ -354,7 +354,7 @@ func writeCorsHeaders(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
func httpError(w http.ResponseWriter, err string, status int) {
|
||||
log.Error(err)
|
||||
log.WithField("status", status).Errorf("HTTP error: %v", err)
|
||||
http.Error(w, err, status)
|
||||
}
|
||||
|
||||
|
|
@ -419,13 +419,13 @@ func createRouter(c *context, enableCors bool) *mux.Router {
|
|||
|
||||
for method, routes := range m {
|
||||
for route, fct := range routes {
|
||||
log.Debugf("Registering %s, %s", method, route)
|
||||
log.WithFields(log.Fields{"method": method, "route": route}).Debug("Registering HTTP route")
|
||||
|
||||
// NOTE: scope issue, make sure the variables are local and won't be changed
|
||||
localRoute := route
|
||||
localFct := fct
|
||||
wrap := func(w http.ResponseWriter, r *http.Request) {
|
||||
log.Infof("%s %s", r.Method, r.RequestURI)
|
||||
log.WithFields(log.Fields{"method": r.Method, "uri": r.RequestURI}).Info("HTTP request received")
|
||||
if enableCors {
|
||||
writeCorsHeaders(w, r)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ func ListenAndServe(c *cluster.Cluster, s *scheduler.Scheduler, hosts []string,
|
|||
}
|
||||
|
||||
go func() {
|
||||
log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1])
|
||||
log.WithFields(log.Fields{"proto": protoAddrParts[0], "addr": protoAddrParts[1]}).Info("Listening for HTTP")
|
||||
|
||||
var (
|
||||
l net.Listener
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ func proxy(tlsConfig *tls.Config, addr string, w http.ResponseWriter, r *http.Re
|
|||
r.URL.Scheme = scheme
|
||||
r.URL.Host = addr
|
||||
|
||||
log.Debugf("[PROXY] --> %s %s", r.Method, r.URL)
|
||||
log.WithFields(log.Fields{"method": r.Method, "url": r.URL}).Debug("Proxy request")
|
||||
resp, err := client.Do(r)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -77,7 +77,7 @@ func hijack(tlsConfig *tls.Config, addr string, w http.ResponseWriter, r *http.R
|
|||
addr = parts[1]
|
||||
}
|
||||
|
||||
log.Debugf("[HIJACK PROXY] --> %s", addr)
|
||||
log.WithField("addr", addr).Debug("Proxy hijack request")
|
||||
|
||||
var (
|
||||
d net.Conn
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ func (n *Node) RefreshContainers(full bool) error {
|
|||
for _, c := range containers {
|
||||
merged, err = n.updateContainer(c, merged, full)
|
||||
if err != nil {
|
||||
log.Errorf("[%s/%s] Unable to update state of %s", n.ID, n.Name, c.Id)
|
||||
log.WithFields(log.Fields{"name": n.Name, "id": n.ID}).Errorf("Unable to update state of container %q", c.Id)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -174,7 +174,7 @@ func (n *Node) RefreshContainers(full bool) error {
|
|||
defer n.Unlock()
|
||||
n.containers = merged
|
||||
|
||||
log.Debugf("[%s/%s] Updated state", n.ID, n.Name)
|
||||
log.WithFields(log.Fields{"id": n.ID, "name": n.Name}).Debugf("Updated node state")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -264,15 +264,15 @@ func (n *Node) refreshLoop() {
|
|||
n.emitEvent("node_disconnect")
|
||||
}
|
||||
n.healthy = false
|
||||
log.Errorf("[%s/%s] Flagging node as dead. Updated state failed: %v", n.ID, n.Name, err)
|
||||
log.WithFields(log.Fields{"name": n.Name, "id": n.ID}).Errorf("Flagging node as dead. Updated state failed: %v", err)
|
||||
} else {
|
||||
if !n.healthy {
|
||||
log.Infof("[%s/%s] Node came back to life. Hooray!", n.ID, n.Name)
|
||||
log.WithFields(log.Fields{"name": n.Name, "id": n.ID}).Info("Node came back to life. Hooray!")
|
||||
n.client.StopAllMonitorEvents()
|
||||
n.client.StartMonitorEvents(n.handler, nil)
|
||||
n.emitEvent("node_reconnect")
|
||||
if err := n.updateSpecs(); err != nil {
|
||||
log.Errorf("[%s/%s] Update node specs failed: %v", n.ID, n.Name, err)
|
||||
log.WithFields(log.Fields{"name": n.Name, "id": n.ID}).Errorf("Update node specs failed: %v", err)
|
||||
}
|
||||
}
|
||||
n.healthy = true
|
||||
|
|
|
|||
|
|
@ -76,6 +76,7 @@ func (s *ConsulDiscoveryService) Fetch() ([]*discovery.Node, error) {
|
|||
|
||||
func (s *ConsulDiscoveryService) Watch(callback discovery.WatchCallback) {
|
||||
for _ = range s.waitForChange() {
|
||||
log.WithField("name", "consul").Debug("Discovery watch triggered")
|
||||
nodes, err := s.Fetch()
|
||||
if err == nil {
|
||||
callback(nodes)
|
||||
|
|
@ -100,7 +101,7 @@ func (s *ConsulDiscoveryService) waitForChange() <-chan uint64 {
|
|||
WaitTime: s.heartbeat}
|
||||
_, meta, err := kv.List(s.prefix, option)
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
log.WithField("name", "consul").Errorf("Discovery error: %v", err)
|
||||
break
|
||||
}
|
||||
s.lastIndex = meta.LastIndex
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ func Register(scheme string, d DiscoveryService) error {
|
|||
if _, exists := discoveries[scheme]; exists {
|
||||
return fmt.Errorf("scheme already registered %s", scheme)
|
||||
}
|
||||
log.Debugf("Registering %q discovery service", scheme)
|
||||
log.WithField("name", scheme).Debug("Registering discovery service")
|
||||
discoveries[scheme] = d
|
||||
|
||||
return nil
|
||||
|
|
@ -69,7 +69,7 @@ func New(rawurl string, heartbeat int) (DiscoveryService, error) {
|
|||
scheme, uri := parse(rawurl)
|
||||
|
||||
if discovery, exists := discoveries[scheme]; exists {
|
||||
log.Debugf("Initializing %q discovery service with %q", scheme, uri)
|
||||
log.WithFields(log.Fields{"name": scheme, "uri": uri}).Debug("Initializing discovery service")
|
||||
err := discovery.Initialize(uri, heartbeat)
|
||||
return discovery, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ func (s *EtcdDiscoveryService) Watch(callback discovery.WatchCallback) {
|
|||
watchChan := make(chan *etcd.Response)
|
||||
go s.client.Watch(s.path, 0, true, watchChan, nil)
|
||||
for _ = range watchChan {
|
||||
log.Debugf("[ETCD] Watch triggered")
|
||||
log.WithField("name", "etcd").Debug("Discovery watch triggered")
|
||||
nodes, err := s.Fetch()
|
||||
if err == nil {
|
||||
callback(nodes)
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ func (s *ZkDiscoveryService) Watch(callback discovery.WatchCallback) {
|
|||
|
||||
addrs, _, eventChan, err := s.conn.ChildrenW(s.fullpath())
|
||||
if err != nil {
|
||||
log.Debugf("[ZK] Watch aborted")
|
||||
log.WithField("name", "zk").Debug("Discovery watch aborted")
|
||||
return
|
||||
}
|
||||
nodes, err := s.createNodes(addrs)
|
||||
|
|
@ -112,7 +112,7 @@ func (s *ZkDiscoveryService) Watch(callback discovery.WatchCallback) {
|
|||
|
||||
for e := range eventChan {
|
||||
if e.Type == zk.EventNodeChildrenChanged {
|
||||
log.Debugf("[ZK] Watch triggered")
|
||||
log.WithField("name", "zk").Debug("Discovery watch triggered")
|
||||
nodes, err := s.Fetch()
|
||||
if err == nil {
|
||||
callback(nodes)
|
||||
|
|
|
|||
2
join.go
2
join.go
|
|
@ -37,7 +37,7 @@ func join(c *cli.Context) {
|
|||
|
||||
hb := time.Duration(c.Int("heartbeat"))
|
||||
for {
|
||||
log.Infof("Registering %q on the discovery service %q every %d seconds...", addr, dflag, hb)
|
||||
log.WithFields(log.Fields{"addr": addr, "discovery": dflag}).Infof("Registering on the discovery service every %d seconds...", hb)
|
||||
time.Sleep(hb * time.Second)
|
||||
if err := d.Register(addr); err != nil {
|
||||
log.Error(err)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ type logHandler struct {
|
|||
}
|
||||
|
||||
func (h *logHandler) Handle(e *cluster.Event) error {
|
||||
log.Printf("event -> status: %q from: %q id: %q node: %q", e.Status, e.From, e.Id, e.Node.Name)
|
||||
log.WithFields(log.Fields{"node": e.Node.Name, "id": e.Id[:12], "from": e.From, "status": e.Status}).Debug("Event received")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -64,10 +64,10 @@ func manage(c *cli.Context) {
|
|||
// If either --tls or --tlsverify are specified, load the certificates.
|
||||
if c.Bool("tls") || c.Bool("tlsverify") {
|
||||
if !c.IsSet("tlscert") || !c.IsSet("tlskey") {
|
||||
log.Fatalf("--tlscert and --tlskey must be provided when using --tls")
|
||||
log.Fatal("--tlscert and --tlskey must be provided when using --tls")
|
||||
}
|
||||
if c.Bool("tlsverify") && !c.IsSet("tlscacert") {
|
||||
log.Fatalf("--tlscacert must be provided when using --tlsverify")
|
||||
log.Fatal("--tlscacert must be provided when using --tlsverify")
|
||||
}
|
||||
tlsConfig, err = loadTlsConfig(
|
||||
c.String("tlscacert"),
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ func New(names []string) ([]Filter, error) {
|
|||
|
||||
for _, name := range names {
|
||||
if filter, exists := filters[name]; exists {
|
||||
log.Debugf("Initializing %q filter", name)
|
||||
log.WithField("name", name).Debug("Initializing filter")
|
||||
selectedFilters = append(selectedFilters, filter)
|
||||
} else {
|
||||
return nil, ErrNotSupported
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ func init() {
|
|||
|
||||
func New(name string) (PlacementStrategy, error) {
|
||||
if strategy, exists := strategies[name]; exists {
|
||||
log.Debugf("Initializing %q strategy", name)
|
||||
log.WithField("name", name).Debugf("Initializing strategy")
|
||||
err := strategy.Initialize()
|
||||
return strategy, err
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue