// InstallFlags adds command-line options to the top-level flag parser for // the current process. // Subsequent calls to `flag.Parse` will populate config with values parsed // from the command-line. func(config *Config)InstallFlags() { flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") flag.StringVar(&config.Root, []string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime") flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated infavor of --restart policies on docker run") flag.BoolVar(&config.EnableIptables, []string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules") flag.BoolVar(&config.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward") flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge/nuse 'none' to disable container networking") flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication") flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver") flag.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, "Set the containers network MTU/nif no value is provided: default to the default route MTU or 1500 if no default route is available") opts.IPVar(&config.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options") // FIXME: why the inconsistency between "hosts" and "sockets"? opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers") opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") }
// New initializes a new engine. funcNew() *Engine { eng := &Engine{ handlers: make(map[string]Handler), id: utils.RandomString(), Stdout: os.Stdout, Stderr: os.Stderr, Stdin: os.Stdin, Logging: true, } eng.Register("commands", func(job *Job)Status { for _, name := range eng.commands() { job.Printf("%s/n", name) } return StatusOK }) // Copy existing global handlers for k, v := range globalHandlers { eng.handlers[k] = v } return eng }
//Trap sets up a simplified signal "trap", appropriate for common // behavior expected from a vanilla unix command-line tool in general // (and the Docker engine in particular). // // * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. // * If SIGINT or SIGTERM are repeated 3 times before cleanup is complete, then cleanup is // skipped and the process terminated directly. // * If "DEBUG" is set in the environment, SIGQUIT causes an exit without cleanup. // funcTrap(cleanup func()) { c := make(chan os.Signal, 1) signals := []os.Signal{os.Interrupt, syscall.SIGTERM} if os.Getenv("DEBUG") == "" { signals = append(signals, syscall.SIGQUIT) } gosignal.Notify(c, signals...) gofunc() { interruptCount := uint32(0) for sig := range c { gofunc(sig os.Signal) { log.Printf("Received signal '%v', starting shutdown of docker.../n", sig) switch sig { case os.Interrupt, syscall.SIGTERM: // If the user really wants to interrupt, let him do so. if atomic.LoadUint32(&interruptCount) < 3 { atomic.AddUint32(&interruptCount, 1) // Initiate the cleanup only once if atomic.LoadUint32(&interruptCount) == 1 { // Call cleanup handler cleanup() os.Exit(0) } else { return } } else { log.Printf("Force shutdown of docker, interrupting cleanup/n") } case syscall.SIGQUIT: } os.Exit(128 + int(sig.(syscall.Signal))) }(sig) } }() }
// Shutdown permanently shuts down eng as follows: // - It refuses all new jobs, permanently. // - It waits for all active jobs to complete (with no timeout) // - It calls all shutdown handlers concurrently (if any) // - It returns when all handlers complete, or after 15 seconds, // whichever happens first. func(eng *Engine)Shutdown() { eng.l.Lock() if eng.shutdown { eng.l.Unlock() return } eng.shutdown = true eng.l.Unlock() // We don't need to protect the rest with a lock, to allow // for other calls to immediately fail with "shutdown" instead // of hanging for 15 seconds. // This requires all concurrent calls to check for shutdown, otherwise // it might cause a race. // Wait for all jobs to complete. // Timeout after 5 seconds. tasksDone := make(chanstruct{}) gofunc() { eng.tasks.Wait() close(tasksDone) }() select { case <-time.After(time.Second * 5): case <-tasksDone: } // Call shutdown handlers, if any. // Timeout after 10 seconds. var wg sync.WaitGroup for _, h := range eng.onShutdown { wg.Add(1) gofunc(h func()) { defer wg.Done() h() }(h) } done := make(chanstruct{}) gofunc() { wg.Wait() close(done) }() select { case <-time.After(time.Second * 10): case <-done: } return }
// NewService returns a new instance of Service ready to be // installed no an engine. funcNewService() *Service { return &Service{} } // Install installs registry capabilities to eng. func(s *Service)Install(eng *engine.Engine)error { eng.Register("auth", s.Auth) eng.Register("search", s.Search) returnnil }
// load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting gofunc() { d, err := daemon.NewDaemon(daemonCfg, eng) if err != nil { log.Fatal(err) } if err := d.Install(eng); err != nil { log.Fatal(err) } // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatal(err) } }()
// Job creates a new job which can later be executed. // This function mimics `Command` from the standard os/exec package. func(eng *Engine)Job(name string, args ...string) *Job { job := &Job{ Eng: eng, Name: name, Args: args, Stdin: NewInput(), Stdout: NewOutput(), Stderr: NewOutput(), env: &Env{}, } if eng.Logging { job.Stderr.Add(utils.NopWriteCloser(eng.Stderr)) } // Catchall is shadowed by specific Register. if handler, exists := eng.handlers[name]; exists { job.handler = handler } elseif eng.catchall != nil && name != "" { // empty job names are illegal, catchall or not. job.handler = eng.catchall } return job }
// A job is the fundamental unit of work in the docker engine. // Everything docker can do should eventually be exposed as a job. // For example: execute a process in a container, create a new container, // download an archive from the internet, serve the http api, etc. // // The job API is designed after unix processes: a job has a name, arguments, // environment variables, standard streams for input, output and error, and // an exit status which can indicate success (0) or error (anything else). // // One slight variation is that jobs report their status as a string. The // string "0" indicates success, and any other strings indicates an error. // This allows for richer error reporting. // type Job struct { Eng *Engine Name string Args []string env *Env Stdout *Output Stderr *Output Stdin *Input handler Handler status Status end time.Time } type Status int const ( StatusOK Status = 0 StatusErr Status = 1 StatusNotFound Status = 127 ) // Run executes the job and blocks until the job completes. // If the job returns a failure status, an error is returned // which includes the status. func(job *Job)Run()error { if job.Eng.IsShutdown() { return fmt.Errorf("engine is shutdown") } // FIXME: this is a temporary workaround to avoid Engine.Shutdown // waiting 5 seconds for server/api.ServeApi to complete (which it never will) // everytime the daemon is cleanly restarted. // The permanent fix is to implement Job.Stop and Job.OnStop so that // ServeApi can cooperate and terminate cleanly. if job.Name != "serveapi" { job.Eng.l.Lock() job.Eng.tasks.Add(1) job.Eng.l.Unlock() defer job.Eng.tasks.Done() } // FIXME: make this thread-safe // FIXME: implement wait if !job.end.IsZero() { return fmt.Errorf("%s: job has already completed", job.Name) } // Log beginning and end of the job job.Eng.Logf("+job %s", job.CallString()) deferfunc() { job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString()) }() var errorMessage = bytes.NewBuffer(nil) job.Stderr.Add(errorMessage) if job.handler == nil { job.Errorf("%s: command not found", job.Name) job.status = 127 } else { job.status = job.handler(job) job.end = time.Now() } // Wait for all background tasks to complete if err := job.Stdout.Close(); err != nil { return err } if err := job.Stderr.Close(); err != nil { return err } if err := job.Stdin.Close(); err != nil { return err } if job.status != 0 { return fmt.Errorf("%s", Tail(errorMessage, 1)) } returnnil }
funcAcceptConnections(job *engine.Job)engine.Status { // Tell the init daemon we are accepting requests go systemd.SdNotify("READY=1") if activationLock != nil { close(activationLock) } return engine.StatusOK }