498 lines
		
	
	
		
			13 KiB
		
	
	
	
		
			Go
		
	
	
	
			
		
		
	
	
			498 lines
		
	
	
		
			13 KiB
		
	
	
	
		
			Go
		
	
	
	
| package libpod
 | |
| 
 | |
| import (
 | |
| 	"context"
 | |
| 
 | |
| 	"github.com/pkg/errors"
 | |
| 	"github.com/sirupsen/logrus"
 | |
| 	"github.com/ulule/deepcopier"
 | |
| )
 | |
| 
 | |
| const (
 | |
| 	stopped = "Stopped"
 | |
| 	running = "Running"
 | |
| 	paused  = "Paused"
 | |
| 	exited  = "Exited"
 | |
| 	errored = "Error"
 | |
| 	created = "Created"
 | |
| )
 | |
| 
 | |
| // Start starts all containers within a pod
 | |
| // It combines the effects of Init() and Start() on a container
 | |
| // If a container has already been initialized it will be started,
 | |
| // otherwise it will be initialized then started.
 | |
| // Containers that are already running or have been paused are ignored
 | |
| // All containers are started independently, in order dictated by their
 | |
| // dependencies.
 | |
| // An error and a map[string]error are returned
 | |
| // If the error is not nil and the map is nil, an error was encountered before
 | |
| // any containers were started
 | |
| // If map is not nil, an error was encountered when starting one or more
 | |
| // containers. The container ID is mapped to the error encountered. The error is
 | |
| // set to ErrCtrExists
 | |
| // If both error and the map are nil, all containers were started successfully
 | |
| func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
 | |
| 	p.lock.Lock()
 | |
| 	defer p.lock.Unlock()
 | |
| 
 | |
| 	if !p.valid {
 | |
| 		return nil, ErrPodRemoved
 | |
| 	}
 | |
| 
 | |
| 	allCtrs, err := p.runtime.state.PodContainers(p)
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 
 | |
| 	// Build a dependency graph of containers in the pod
 | |
| 	graph, err := buildContainerGraph(allCtrs)
 | |
| 	if err != nil {
 | |
| 		return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
 | |
| 	}
 | |
| 
 | |
| 	ctrErrors := make(map[string]error)
 | |
| 	ctrsVisited := make(map[string]bool)
 | |
| 
 | |
| 	// If there are no containers without dependencies, we can't start
 | |
| 	// Error out
 | |
| 	if len(graph.noDepNodes) == 0 {
 | |
| 		return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
 | |
| 	}
 | |
| 
 | |
| 	// Traverse the graph beginning at nodes with no dependencies
 | |
| 	for _, node := range graph.noDepNodes {
 | |
| 		startNode(ctx, node, false, ctrErrors, ctrsVisited, false)
 | |
| 	}
 | |
| 
 | |
| 	if len(ctrErrors) > 0 {
 | |
| 		return ctrErrors, errors.Wrapf(ErrCtrExists, "error starting some containers")
 | |
| 	}
 | |
| 
 | |
| 	return nil, nil
 | |
| }
 | |
| 
 | |
| // Stop stops all containers within a pod that are not already stopped
 | |
| // Each container will use its own stop timeout
 | |
| // Only running containers will be stopped. Paused, stopped, or created
 | |
| // containers will be ignored.
 | |
| // If cleanup is true, mounts and network namespaces will be cleaned up after
 | |
| // the container is stopped.
 | |
| // All containers are stopped independently. An error stopping one container
 | |
| // will not prevent other containers being stopped.
 | |
| // An error and a map[string]error are returned
 | |
| // If the error is not nil and the map is nil, an error was encountered before
 | |
| // any containers were stopped
 | |
| // If map is not nil, an error was encountered when stopping one or more
 | |
| // containers. The container ID is mapped to the error encountered. The error is
 | |
| // set to ErrCtrExists
 | |
| // If both error and the map are nil, all containers were stopped without error
 | |
| func (p *Pod) Stop(cleanup bool) (map[string]error, error) {
 | |
| 	p.lock.Lock()
 | |
| 	defer p.lock.Unlock()
 | |
| 
 | |
| 	if !p.valid {
 | |
| 		return nil, ErrPodRemoved
 | |
| 	}
 | |
| 
 | |
| 	allCtrs, err := p.runtime.state.PodContainers(p)
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 
 | |
| 	ctrErrors := make(map[string]error)
 | |
| 
 | |
| 	// TODO: There may be cases where it makes sense to order stops based on
 | |
| 	// dependencies. Should we bother with this?
 | |
| 
 | |
| 	// Stop to all containers
 | |
| 	for _, ctr := range allCtrs {
 | |
| 		ctr.lock.Lock()
 | |
| 
 | |
| 		if err := ctr.syncContainer(); err != nil {
 | |
| 			ctr.lock.Unlock()
 | |
| 			ctrErrors[ctr.ID()] = err
 | |
| 			continue
 | |
| 		}
 | |
| 
 | |
| 		// Ignore containers that are not running
 | |
| 		if ctr.state.State != ContainerStateRunning {
 | |
| 			ctr.lock.Unlock()
 | |
| 			continue
 | |
| 		}
 | |
| 
 | |
| 		if err := ctr.stop(ctr.config.StopTimeout); err != nil {
 | |
| 			ctr.lock.Unlock()
 | |
| 			ctrErrors[ctr.ID()] = err
 | |
| 			continue
 | |
| 		}
 | |
| 
 | |
| 		if cleanup {
 | |
| 			if err := ctr.cleanup(); err != nil {
 | |
| 				ctrErrors[ctr.ID()] = err
 | |
| 			}
 | |
| 		}
 | |
| 
 | |
| 		ctr.lock.Unlock()
 | |
| 	}
 | |
| 
 | |
| 	if len(ctrErrors) > 0 {
 | |
| 		return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers")
 | |
| 	}
 | |
| 
 | |
| 	return nil, nil
 | |
| }
 | |
| 
 | |
| // Pause pauses all containers within a pod that are running.
 | |
| // Only running containers will be paused. Paused, stopped, or created
 | |
| // containers will be ignored.
 | |
| // All containers are paused independently. An error pausing one container
 | |
| // will not prevent other containers being paused.
 | |
| // An error and a map[string]error are returned
 | |
| // If the error is not nil and the map is nil, an error was encountered before
 | |
| // any containers were paused
 | |
| // If map is not nil, an error was encountered when pausing one or more
 | |
| // containers. The container ID is mapped to the error encountered. The error is
 | |
| // set to ErrCtrExists
 | |
| // If both error and the map are nil, all containers were paused without error
 | |
| func (p *Pod) Pause() (map[string]error, error) {
 | |
| 	p.lock.Lock()
 | |
| 	defer p.lock.Unlock()
 | |
| 
 | |
| 	if !p.valid {
 | |
| 		return nil, ErrPodRemoved
 | |
| 	}
 | |
| 
 | |
| 	allCtrs, err := p.runtime.state.PodContainers(p)
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 
 | |
| 	ctrErrors := make(map[string]error)
 | |
| 
 | |
| 	// Pause to all containers
 | |
| 	for _, ctr := range allCtrs {
 | |
| 		ctr.lock.Lock()
 | |
| 
 | |
| 		if err := ctr.syncContainer(); err != nil {
 | |
| 			ctr.lock.Unlock()
 | |
| 			ctrErrors[ctr.ID()] = err
 | |
| 			continue
 | |
| 		}
 | |
| 
 | |
| 		// Ignore containers that are not running
 | |
| 		if ctr.state.State != ContainerStateRunning {
 | |
| 			ctr.lock.Unlock()
 | |
| 			continue
 | |
| 		}
 | |
| 
 | |
| 		if err := ctr.pause(); err != nil {
 | |
| 			ctr.lock.Unlock()
 | |
| 			ctrErrors[ctr.ID()] = err
 | |
| 			continue
 | |
| 		}
 | |
| 
 | |
| 		ctr.lock.Unlock()
 | |
| 	}
 | |
| 
 | |
| 	if len(ctrErrors) > 0 {
 | |
| 		return ctrErrors, errors.Wrapf(ErrCtrExists, "error pausing some containers")
 | |
| 	}
 | |
| 
 | |
| 	return nil, nil
 | |
| }
 | |
| 
 | |
| // Unpause unpauses all containers within a pod that are running.
 | |
| // Only paused containers will be unpaused. Running, stopped, or created
 | |
| // containers will be ignored.
 | |
| // All containers are unpaused independently. An error unpausing one container
 | |
| // will not prevent other containers being unpaused.
 | |
| // An error and a map[string]error are returned
 | |
| // If the error is not nil and the map is nil, an error was encountered before
 | |
| // any containers were unpaused
 | |
| // If map is not nil, an error was encountered when unpausing one or more
 | |
| // containers. The container ID is mapped to the error encountered. The error is
 | |
| // set to ErrCtrExists
 | |
| // If both error and the map are nil, all containers were unpaused without error
 | |
| func (p *Pod) Unpause() (map[string]error, error) {
 | |
| 	p.lock.Lock()
 | |
| 	defer p.lock.Unlock()
 | |
| 
 | |
| 	if !p.valid {
 | |
| 		return nil, ErrPodRemoved
 | |
| 	}
 | |
| 
 | |
| 	allCtrs, err := p.runtime.state.PodContainers(p)
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 
 | |
| 	ctrErrors := make(map[string]error)
 | |
| 
 | |
| 	// Pause to all containers
 | |
| 	for _, ctr := range allCtrs {
 | |
| 		ctr.lock.Lock()
 | |
| 
 | |
| 		if err := ctr.syncContainer(); err != nil {
 | |
| 			ctr.lock.Unlock()
 | |
| 			ctrErrors[ctr.ID()] = err
 | |
| 			continue
 | |
| 		}
 | |
| 
 | |
| 		// Ignore containers that are not paused
 | |
| 		if ctr.state.State != ContainerStatePaused {
 | |
| 			ctr.lock.Unlock()
 | |
| 			continue
 | |
| 		}
 | |
| 
 | |
| 		if err := ctr.unpause(); err != nil {
 | |
| 			ctr.lock.Unlock()
 | |
| 			ctrErrors[ctr.ID()] = err
 | |
| 			continue
 | |
| 		}
 | |
| 
 | |
| 		ctr.lock.Unlock()
 | |
| 	}
 | |
| 
 | |
| 	if len(ctrErrors) > 0 {
 | |
| 		return ctrErrors, errors.Wrapf(ErrCtrExists, "error unpausing some containers")
 | |
| 	}
 | |
| 
 | |
| 	return nil, nil
 | |
| }
 | |
| 
 | |
| // Restart restarts all containers within a pod that are not paused or in an error state.
 | |
| // It combines the effects of Stop() and Start() on a container
 | |
| // Each container will use its own stop timeout.
 | |
| // All containers are started independently, in order dictated by their
 | |
| // dependencies. An error restarting one container
 | |
| // will not prevent other containers being restarted.
 | |
| // An error and a map[string]error are returned
 | |
| // If the error is not nil and the map is nil, an error was encountered before
 | |
| // any containers were restarted
 | |
| // If map is not nil, an error was encountered when restarting one or more
 | |
| // containers. The container ID is mapped to the error encountered. The error is
 | |
| // set to ErrCtrExists
 | |
| // If both error and the map are nil, all containers were restarted without error
 | |
| func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
 | |
| 	p.lock.Lock()
 | |
| 	defer p.lock.Unlock()
 | |
| 
 | |
| 	if !p.valid {
 | |
| 		return nil, ErrPodRemoved
 | |
| 	}
 | |
| 
 | |
| 	allCtrs, err := p.runtime.state.PodContainers(p)
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 
 | |
| 	// Build a dependency graph of containers in the pod
 | |
| 	graph, err := buildContainerGraph(allCtrs)
 | |
| 	if err != nil {
 | |
| 		return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
 | |
| 	}
 | |
| 
 | |
| 	ctrErrors := make(map[string]error)
 | |
| 	ctrsVisited := make(map[string]bool)
 | |
| 
 | |
| 	// If there are no containers without dependencies, we can't start
 | |
| 	// Error out
 | |
| 	if len(graph.noDepNodes) == 0 {
 | |
| 		return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
 | |
| 	}
 | |
| 
 | |
| 	// Traverse the graph beginning at nodes with no dependencies
 | |
| 	for _, node := range graph.noDepNodes {
 | |
| 		startNode(ctx, node, false, ctrErrors, ctrsVisited, true)
 | |
| 	}
 | |
| 
 | |
| 	if len(ctrErrors) > 0 {
 | |
| 		return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers")
 | |
| 	}
 | |
| 
 | |
| 	return nil, nil
 | |
| }
 | |
| 
 | |
| // Kill sends a signal to all running containers within a pod
 | |
| // Signals will only be sent to running containers. Containers that are not
 | |
| // running will be ignored. All signals are sent independently, and sending will
 | |
| // continue even if some containers encounter errors.
 | |
| // An error and a map[string]error are returned
 | |
| // If the error is not nil and the map is nil, an error was encountered before
 | |
| // any containers were signalled
 | |
| // If map is not nil, an error was encountered when signalling one or more
 | |
| // containers. The container ID is mapped to the error encountered. The error is
 | |
| // set to ErrCtrExists
 | |
| // If both error and the map are nil, all containers were signalled successfully
 | |
| func (p *Pod) Kill(signal uint) (map[string]error, error) {
 | |
| 	p.lock.Lock()
 | |
| 	defer p.lock.Unlock()
 | |
| 
 | |
| 	if !p.valid {
 | |
| 		return nil, ErrPodRemoved
 | |
| 	}
 | |
| 
 | |
| 	allCtrs, err := p.runtime.state.PodContainers(p)
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 
 | |
| 	ctrErrors := make(map[string]error)
 | |
| 
 | |
| 	// Send a signal to all containers
 | |
| 	for _, ctr := range allCtrs {
 | |
| 		ctr.lock.Lock()
 | |
| 
 | |
| 		if err := ctr.syncContainer(); err != nil {
 | |
| 			ctr.lock.Unlock()
 | |
| 			ctrErrors[ctr.ID()] = err
 | |
| 			continue
 | |
| 		}
 | |
| 
 | |
| 		// Ignore containers that are not running
 | |
| 		if ctr.state.State != ContainerStateRunning {
 | |
| 			ctr.lock.Unlock()
 | |
| 			continue
 | |
| 		}
 | |
| 
 | |
| 		if err := ctr.runtime.ociRuntime.killContainer(ctr, signal); err != nil {
 | |
| 			ctr.lock.Unlock()
 | |
| 			ctrErrors[ctr.ID()] = err
 | |
| 			continue
 | |
| 		}
 | |
| 
 | |
| 		logrus.Debugf("Killed container %s with signal %d", ctr.ID(), signal)
 | |
| 	}
 | |
| 
 | |
| 	if len(ctrErrors) > 0 {
 | |
| 		return ctrErrors, errors.Wrapf(ErrCtrExists, "error killing some containers")
 | |
| 	}
 | |
| 
 | |
| 	return nil, nil
 | |
| }
 | |
| 
 | |
| // ContainerStatus gets the status of all containers in the pod
 | |
| // Returns a map of Container ID to Container Status
 | |
| func (p *Pod) ContainerStatus() (map[string]ContainerStatus, error) {
 | |
| 	p.lock.Lock()
 | |
| 	defer p.lock.Unlock()
 | |
| 
 | |
| 	if !p.valid {
 | |
| 		return nil, ErrPodRemoved
 | |
| 	}
 | |
| 
 | |
| 	allCtrs, err := p.runtime.state.PodContainers(p)
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 
 | |
| 	// We need to lock all the containers
 | |
| 	for _, ctr := range allCtrs {
 | |
| 		ctr.lock.Lock()
 | |
| 		defer ctr.lock.Unlock()
 | |
| 	}
 | |
| 
 | |
| 	// Now that all containers are locked, get their status
 | |
| 	status := make(map[string]ContainerStatus, len(allCtrs))
 | |
| 	for _, ctr := range allCtrs {
 | |
| 		if err := ctr.syncContainer(); err != nil {
 | |
| 			return nil, err
 | |
| 		}
 | |
| 
 | |
| 		status[ctr.ID()] = ctr.state.State
 | |
| 	}
 | |
| 
 | |
| 	return status, nil
 | |
| }
 | |
| 
 | |
| // Status determines the status of the pod based on the
 | |
| // statuses of the containers in the pod.
 | |
| // Returns a string representation of the pod status
 | |
| func (p *Pod) Status() (string, error) {
 | |
| 	ctrStatuses, err := p.ContainerStatus()
 | |
| 	if err != nil {
 | |
| 		return errored, err
 | |
| 	}
 | |
| 	ctrNum := len(ctrStatuses)
 | |
| 	if ctrNum == 0 {
 | |
| 		return created, nil
 | |
| 	}
 | |
| 	statuses := map[string]int{
 | |
| 		stopped: 0,
 | |
| 		running: 0,
 | |
| 		paused:  0,
 | |
| 		created: 0,
 | |
| 		errored: 0,
 | |
| 	}
 | |
| 	for _, ctrStatus := range ctrStatuses {
 | |
| 		switch ctrStatus {
 | |
| 		case ContainerStateStopped:
 | |
| 			statuses[stopped]++
 | |
| 		case ContainerStateRunning:
 | |
| 			statuses[running]++
 | |
| 		case ContainerStatePaused:
 | |
| 			statuses[paused]++
 | |
| 		case ContainerStateCreated, ContainerStateConfigured:
 | |
| 			statuses[created]++
 | |
| 		default:
 | |
| 			statuses[errored]++
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	if statuses[running] > 0 {
 | |
| 		return running, nil
 | |
| 	} else if statuses[paused] == ctrNum {
 | |
| 		return paused, nil
 | |
| 	} else if statuses[stopped] == ctrNum {
 | |
| 		return exited, nil
 | |
| 	} else if statuses[stopped] > 0 {
 | |
| 		return stopped, nil
 | |
| 	} else if statuses[errored] > 0 {
 | |
| 		return errored, nil
 | |
| 	}
 | |
| 	return created, nil
 | |
| }
 | |
| 
 | |
| // Inspect returns a PodInspect struct to describe the pod
 | |
| func (p *Pod) Inspect() (*PodInspect, error) {
 | |
| 	var (
 | |
| 		podContainers []PodContainerInfo
 | |
| 	)
 | |
| 
 | |
| 	p.lock.Lock()
 | |
| 	defer p.lock.Unlock()
 | |
| 	if err := p.updatePod(); err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 
 | |
| 	containers, err := p.runtime.state.PodContainers(p)
 | |
| 	if err != nil {
 | |
| 		return &PodInspect{}, err
 | |
| 	}
 | |
| 	for _, c := range containers {
 | |
| 		containerStatus := "unknown"
 | |
| 		// Ignoring possible errors here because we dont want this to be
 | |
| 		// catastrophic in nature
 | |
| 		containerState, err := c.State()
 | |
| 		if err == nil {
 | |
| 			containerStatus = containerState.String()
 | |
| 		}
 | |
| 		pc := PodContainerInfo{
 | |
| 			ID:    c.ID(),
 | |
| 			State: containerStatus,
 | |
| 		}
 | |
| 		podContainers = append(podContainers, pc)
 | |
| 	}
 | |
| 
 | |
| 	config := new(PodConfig)
 | |
| 	deepcopier.Copy(p.config).To(config)
 | |
| 	inspectData := PodInspect{
 | |
| 		Config: config,
 | |
| 		State: &PodInspectState{
 | |
| 			CgroupPath: p.state.CgroupPath,
 | |
| 		},
 | |
| 		Containers: podContainers,
 | |
| 	}
 | |
| 	return &inspectData, nil
 | |
| }
 |