Clear Containers for Docker Engine execution driver

Signed-off-by: Dimitri John Ledkov <dimitri.j.ledkov@intel.com>
Signed-off-by: James Hunt <james.o.hunt@intel.com>
Signed-off-by: Michael Doherty <michael.i.doherty@intel.com>
This commit is contained in:
Dimitri John Ledkov
2015-06-09 11:05:16 +01:00
parent 6bf1c41b77
commit e45d0eb532
10 changed files with 960 additions and 32 deletions

View File

@@ -11,7 +11,7 @@ import (
var (
defaultPidFile = "/var/run/docker.pid"
defaultGraph = "/var/lib/docker"
defaultExec = "native"
defaultExec = "clr"
)
// Config defines the configuration of a docker daemon.

View File

@@ -0,0 +1,717 @@
// +build linux
package clr
import (
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"os"
"os/exec"
"path"
"strconv"
"strings"
"sync"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/mount"
sysinfo "github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/term"
"github.com/docker/libnetwork/netlabel"
"github.com/kr/pty"
"github.com/opencontainers/runc/libcontainer/configs"
)
const (
// Clear Linux for Intel(R) Architecture
driverName = "clr"
envVarPrefix = "CLR_"
// Command used for lkvm control
lkvmName = "lkvm"
// local "latest" information
clrFile = "latest"
// upstream base URL
clrURL = "https://download.clearlinux.org"
// upstream latest release file
latestFile = "https://download.clearlinux.org/latest"
// clr kernel (not bzimage)
clrKernel = "/usr/lib/kernel/vmlinux.container"
)
type driver struct {
root string // root path for the driver to use
libPath string
initPath string
version string
apparmor bool
sharedRoot bool
activeContainers map[string]*activeContainer
machineMemory int64
containerPid int
sync.Mutex
}
type activeContainer struct {
container *configs.Config
cmd *exec.Cmd
}
func getTapIf(c *execdriver.Command) string {
return fmt.Sprintf("tb-%s", c.ID[:12])
}
func getClrVersion(libPath string) string {
txt, err := ioutil.ReadFile(path.Join(libPath, clrFile))
if err != nil {
return ""
}
return strings.Split(string(txt), "\n")[0]
}
func fetchLatest(libPath string) error {
out, err := os.Create(path.Join(libPath, clrFile))
if err != nil {
return err
}
defer out.Close()
resp, err := http.Get(latestFile)
if err != nil {
return err
}
defer resp.Body.Close()
_, err = io.Copy(out, resp.Body)
return err
}
func fetchImage(version, libPath string) error {
// TODO: Add checksum validation
outfile := fmt.Sprintf("clear-%s-containers.img.xz", version)
url := fmt.Sprintf("%s/releases/%s/clear/%s", clrURL, version, outfile)
outpath := path.Join(libPath, outfile)
var output []byte
logrus.Debugf("Fetching clr version: %s, %s", version, outpath)
out, err := os.Create(outpath)
if err != nil {
return err
}
defer out.Close()
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
// Consider progress feedback ?
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
// decompress the file
cmd := exec.Command("unxz", outpath)
cmd.Dir = libPath
if output, err = cmd.CombinedOutput(); err != nil {
logrus.Debugf("Unable to extract image %s: %s", version, output)
return err
}
return nil
}
// NewDriver creates a new clear linux execution driver.
func NewDriver(root, libPath, initPath string, apparmor bool) (*driver, error) {
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
meminfo, err := sysinfo.ReadMemInfo()
if err != nil {
return nil, err
}
version, err := prepareClr(libPath)
if err != nil {
return nil, err
}
return &driver{
apparmor: apparmor,
root: root,
libPath: libPath,
initPath: initPath,
version: version,
sharedRoot: false,
activeContainers: make(map[string]*activeContainer),
// FIXME:
machineMemory: meminfo.MemTotal,
}, nil
}
func prepareClr(libPath string) (string, error) {
var version = getClrVersion(libPath)
var nversion string
logrus.Debugf("%s preparing environment", driverName)
err := fetchLatest(libPath)
if err != nil {
return "", err
}
nversion = getClrVersion(libPath)
if nversion != version && version != "" {
logrus.Debugf("Updating to clr version: %s", nversion)
err = fetchImage(nversion, libPath)
} else if version == "" {
logrus.Debugf("Installing clr version: %s", nversion)
err = fetchImage(nversion, libPath)
} else {
logrus.Debugf("Using clr version: %s", nversion)
}
return nversion, nil
}
func (d *driver) Name() string {
return fmt.Sprintf("%s-%s", driverName, d.version)
}
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
var (
term execdriver.Terminal
err error
)
container, err := d.createContainer(c)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
memoryMiB := c.HostConfig.Memory
if memoryMiB == 0 {
memoryMiB = 1024
} else {
// docker passes the value as bytes
memoryMiB = memoryMiB / int64(math.Pow(2, 20))
}
workingDirVar := fmt.Sprintf("%s%s=%q", envVarPrefix, "WORKINGDIR", c.WorkingDir)
c.ProcessConfig.Cmd.Env = append(c.ProcessConfig.Cmd.Env, workingDirVar)
userVar := fmt.Sprintf("%s%s=%q", envVarPrefix, "USER", c.ProcessConfig.User)
c.ProcessConfig.Cmd.Env = append(c.ProcessConfig.Cmd.Env, userVar)
if err := d.setupNetwork(c); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
if c.ProcessConfig.Tty {
term, err = NewTtyConsole(&c.ProcessConfig, pipes)
} else {
term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
}
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
c.ProcessConfig.Terminal = term
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
container: container,
cmd: &c.ProcessConfig.Cmd,
}
d.Unlock()
if err := d.generateEnvConfig(c); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
if err := d.generateDockerInit(c); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
for _, m := range c.Mounts {
dest := path.Join(c.Rootfs, m.Destination)
if m.Destination == "/etc/hostname" {
continue
}
if !pathExists(m.Source) {
continue
}
opts := "bind"
if m.Private {
opts = opts + ",rprivate"
}
if m.Slave {
opts = opts + ",rslave"
}
// This may look racy, but it isn't since the VM isn't
// running yet.
//
// The check is necessary to handle bind mounting of
// regular files correctly since without it we may be
// attempting to create a directory where there already
// exists a normal file.
if !pathExists(dest) {
if err := os.MkdirAll(dest, 0750); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
}
if err := mount.Mount(m.Source, dest, "", opts); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
if !m.Writable {
if err := mount.Mount("", dest, "", "bind,remount,ro"); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
}
defer mount.Unmount(dest)
}
var args []string
// various things for lkvm
ifname := getTapIf(c)
// FIXME: Should be real hostname from like process/container struct
hostname := c.ID[0:12]
img := fmt.Sprintf("%s/clear-%s-containers.img", d.libPath, d.version)
memory := fmt.Sprintf("%d", memoryMiB)
// FIXME: Locked cores to 6 ?
cores := fmt.Sprintf("%d", 6)
ipaddr := c.NetworkSettings.IPAddress
gateway := c.NetworkSettings.Gateway
macaddr := c.NetworkSettings.MacAddress
args = append(args, c.ProcessConfig.Entrypoint)
args = append(args, c.ProcessConfig.Arguments...)
rootParams := fmt.Sprintf("root=/dev/plkvm0p1 rootfstype=ext4 rootflags=dax,data=ordered "+
"init=/usr/lib/systemd/systemd systemd.unit=container.target rw tsc=reliable "+
"systemd.show_status=false "+
"no_timer_check rcupdate.rcu_expedited=1 console=hvc0 quiet ip=%s::%s::%s::off",
ipaddr, gateway, hostname)
params := []string{
lkvmName, "run", "-c", cores, "-m", memory,
"--name", c.ID, "--console", "virtio",
"--kernel", clrKernel,
"--params", rootParams,
"--shmem", fmt.Sprintf("0x200000000:0:file=%s:private", img),
"--network", fmt.Sprintf("mode=tap,script=none,tapif=%s,guest_mac=%s", ifname, macaddr),
"--9p", fmt.Sprintf("%s,rootfs", c.Rootfs),
}
logrus.Debugf("%s params %s", driverName, params)
var (
name = params[0]
arg = params[1:]
)
aname, err := exec.LookPath(name)
if err != nil {
aname = name
}
c.ProcessConfig.Path = aname
c.ProcessConfig.Args = append([]string{name}, arg...)
c.ProcessConfig.Env = []string{fmt.Sprintf("HOME=%s", d.root)}
// Start the container. Since it runs synchronously, we don't Wait()
// for it since we need to check the status to determine if it did
// actually start successfully.
if err := c.ProcessConfig.Start(); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
var (
waitErr error
waitLock = make(chan struct{})
)
go func() {
if err := c.ProcessConfig.Wait(); err != nil {
if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0
waitErr = err
}
}
close(waitLock)
}()
// FIXME: need to create state.json for Stats() to work.
c.ContainerPid = c.ProcessConfig.Process.Pid
d.containerPid = c.ProcessConfig.Process.Pid
if startCallback != nil {
logrus.Debugf("Invoking startCallback")
startCallback(&c.ProcessConfig, c.ProcessConfig.Process.Pid)
}
// FIXME:
oomKill := false
// Wait for the VM to shutdown
<-waitLock
exitCode := getExitCode(c)
cExitStatus, cerr := d.cleanupVM(c)
if cerr != nil {
waitErr = cerr
exitCode = cExitStatus
}
// check oom error
if oomKill {
exitCode = 137
}
return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: false}, waitErr
}
func pathExists(path string) bool {
if _, err := os.Stat(path); err == nil {
return true
}
return false
}
func pathExecutable(path string) bool {
s, err := os.Stat(path)
if err != nil {
return false
}
mode := s.Mode()
if mode&0111 != 0 {
return true
}
return false
}
func (d *driver) cleanupVM(c *execdriver.Command) (exitStatus int, err error) {
cmd := exec.Command("ip", "tuntap", "del", "dev", getTapIf(c), "mode", "tap")
var output []byte
if output, err = cmd.CombinedOutput(); err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
waitStatus := exitError.Sys().(syscall.WaitStatus)
exitStatus = waitStatus.ExitStatus()
}
logrus.Debugf("teardown failed for vm %s: %s (%s)", c.ID, string(output), err.Error())
}
// doesn't matter if this fails
// lkvm could have removed it, and stale sockets are not fatal
_ = os.Remove(fmt.Sprintf("%s/.lkvm/%s.sock", d.root, c.ID))
return exitStatus, err
}
// createContainer populates and configures the container type with the
// data provided by the execdriver.Command
func (d *driver) createContainer(c *execdriver.Command) (*configs.Config, error) {
return execdriver.InitContainer(c), nil
}
/// Return the exit code of the process
// if the process has not exited -1 will be returned
func getExitCode(c *execdriver.Command) int {
if c.ProcessConfig.ProcessState == nil {
return -1
}
return c.ProcessConfig.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
}
func (d *driver) lkvmCommand(c *execdriver.Command, arg string) ([]byte, error) {
args := append([]string{lkvmName}, arg)
if c != nil {
args = append(args, "--name", c.ID)
}
cmd := exec.Command(lkvmName, args...)
cmd.Env = []string{fmt.Sprintf("HOME=%s", d.root)}
return cmd.Output()
}
// Kill sends a signal to workload
func (d *driver) Kill(c *execdriver.Command, sig int) error {
// Not supported
return nil
}
func (d *driver) Pause(c *execdriver.Command) error {
_, err := d.lkvmCommand(c, "pause")
return err
}
func (d *driver) Unpause(c *execdriver.Command) error {
_, err := d.lkvmCommand(c, "resume")
return err
}
// Terminate forcibly stops a container
func (d *driver) Terminate(c *execdriver.Command) error {
_, err := d.lkvmCommand(c, "stop")
return err
}
func (d *driver) containerDir(containerID string) string {
return path.Join(d.libPath, "containers", containerID)
}
// isDigit returns true if s can be represented as an integer
func isDigit(s string) bool {
if _, err := strconv.Atoi(s); err == nil {
return true
}
return false
}
func (d *driver) getInfo(id string) ([]byte, error) {
output, err := d.lkvmCommand(nil, "list")
if err != nil {
return nil, err
}
for i, line := range strings.Split(string(output), "\n") {
if i < 2 {
continue
}
fields := strings.Fields(strings.TrimSpace(line))
if len(fields) != 3 {
continue
}
if !isDigit(fields[0]) {
continue
}
if fields[1] != id {
continue
}
return []byte(line), nil
}
return []byte(fmt.Sprintf("-1 %s stopped", id)), nil
}
type info struct {
ID string
driver *driver
}
func (i *info) IsRunning() bool {
output, err := i.driver.getInfo(i.ID)
if err != nil {
logrus.Errorf("Error getting info for %s container %s: %s (%s)",
driverName, i.ID, err, output)
return false
}
clrInfo, err := parseClrInfo(i.ID, string(output))
if err != nil {
return false
}
return clrInfo.Running
}
func (d *driver) Info(id string) execdriver.Info {
return &info{
ID: id,
driver: d,
}
}
func (d *driver) GetPidsForContainer(id string) ([]int, error) {
// The VM doesn't expose the worload pid(s), so the only meaningful
// pid is that of the VM
return []int{d.containerPid}, nil
}
// TtyConsole is a type to represent a pseud-oterminal (see pty(7))
type TtyConsole struct {
MasterPty *os.File
SlavePty *os.File
}
// NewTtyConsole returns a new TtyConsole object.
func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) {
// lxc is special in that we cannot create the master outside of the container without
// opening the slave because we have nothing to provide to the cmd. We have to open both then do
// the crazy setup on command right now instead of passing the console path to lxc and telling it
// to open up that console. we save a couple of openfiles in the native driver because we can do
// this.
ptyMaster, ptySlave, err := pty.Open()
if err != nil {
return nil, err
}
tty := &TtyConsole{
MasterPty: ptyMaster,
SlavePty: ptySlave,
}
if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil {
tty.Close()
return nil, err
}
processConfig.Console = tty.SlavePty.Name()
return tty, nil
}
// Master returns the master end of the pty
func (t *TtyConsole) Master() *os.File {
return t.MasterPty
}
// Resize modifies the size of the pty terminal being used.
func (t *TtyConsole) Resize(h, w int) error {
return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
}
// AttachPipes associates the specified pipes with the pty master.
func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error {
command.Stdout = t.SlavePty
command.Stderr = t.SlavePty
go func() {
if wb, ok := pipes.Stdout.(interface {
CloseWriters() error
}); ok {
defer wb.CloseWriters()
}
io.Copy(pipes.Stdout, t.MasterPty)
}()
if pipes.Stdin != nil {
command.Stdin = t.SlavePty
command.SysProcAttr.Setctty = true
go func() {
io.Copy(t.MasterPty, pipes.Stdin)
pipes.Stdin.Close()
}()
}
return nil
}
// Close closes both ends of the pty.
func (t *TtyConsole) Close() error {
t.SlavePty.Close()
return t.MasterPty.Close()
}
func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
return -1, fmt.Errorf("Unsupported: Exec is not supported by the %q driver", driverName)
}
// Clean up after an Exec
func (d *driver) Clean(id string) error {
return nil
}
func (d *driver) generateEnvConfig(c *execdriver.Command) error {
data := []byte(strings.Join(c.ProcessConfig.Env, "\n"))
p := path.Join(d.libPath, "containers", c.ID, "config.env")
c.Mounts = append(c.Mounts, execdriver.Mount{
Source: p,
Destination: "/.dockerenv",
Writable: false,
Private: true,
})
return ioutil.WriteFile(p, data, 0600)
}
func (d *driver) generateDockerInit(c *execdriver.Command) error {
p := fmt.Sprintf("%s/.containerexec", c.Rootfs)
var args []string
if pathExecutable(p) {
return nil
}
args = append(args, c.ProcessConfig.Entrypoint)
args = append(args, c.ProcessConfig.Arguments...)
data := []byte(fmt.Sprintf("#!/bin/sh\n%s\n", strings.Join(args, " ")))
return ioutil.WriteFile(p, data, 0755)
}
func (d *driver) setupNetwork(c *execdriver.Command) error {
ifname := getTapIf(c)
var bridgeName string
var bridgeLinkName string
var output []byte
var err error
if len(c.EndpointInfo) == 1 {
e := c.EndpointInfo[0]
bridgeName = e[netlabel.BridgeName].(string)
bridgeLinkName = e[netlabel.BridgeLinkName].(string)
}
// Strip existing veth
cmd := exec.Command("ip", "link", "del", bridgeLinkName)
if output, err = cmd.CombinedOutput(); err != nil {
logrus.Debugf("%s setupNetwork error: %s", driverName, output)
return err
}
cmd = exec.Command("ip", "tuntap", "add", "dev", ifname, "mode", "tap", "vnet_hdr")
if output, err = cmd.CombinedOutput(); err != nil {
logrus.Debugf("%s setupNetwork error: %s", driverName, output)
return err
}
cmd = exec.Command("ip", "link", "set", "dev", ifname, "master", bridgeName)
if output, err = cmd.CombinedOutput(); err != nil {
logrus.Debugf("%s setupNetwork error: %s", driverName, output)
return err
}
cmd = exec.Command("ip", "link", "set", "dev", ifname, "up")
if output, err = cmd.CombinedOutput(); err != nil {
logrus.Debugf("%s setupNetwork error: %s", driverName, output)
return err
}
return err
}
func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
if _, ok := d.activeContainers[id]; !ok {
return nil, fmt.Errorf("%s is not a key in active containers", id)
}
// FIXME:
return execdriver.Stats(d.containerDir(id), d.activeContainers[id].container.Cgroups.Memory, d.machineMemory)
}

View File

@@ -0,0 +1,51 @@
// +build linux
package clr
import (
"errors"
"strconv"
"strings"
)
var (
ErrCannotParse = errors.New("cannot parse raw input")
)
type clrInfo struct {
Running bool
Pid int
}
func parseClrInfo(name, raw string) (*clrInfo, error) {
if raw == "" {
return nil, ErrCannotParse
}
var (
err error
info = &clrInfo{}
)
fields := strings.Fields(strings.TrimSpace(raw))
// The format is expected to be:
//
// <pid> <name> <state>
//
if len(fields) != 3 {
return nil, ErrCannotParse
}
info.Pid, err = strconv.Atoi(fields[0])
if err != nil {
return nil, ErrCannotParse
}
if fields[1] != name {
return nil, ErrCannotParse
}
info.Running = fields[2] == "running"
return info, nil
}

View File

@@ -0,0 +1,97 @@
// +build linux
package clr
import (
"encoding/json"
"flag"
"fmt"
"os"
"strings"
"syscall"
)
// Args provided to the init function for a driver
type InitArgs struct {
User string
Gateway string
Ip string
WorkDir string
Privileged bool
Env []string
Args []string
Mtu int
Console string
Pipe int
Root string
CapAdd string
CapDrop string
}
func getArgs() *InitArgs {
var (
// Get cmdline arguments
user = flag.String("u", "", "username or uid")
gateway = flag.String("g", "", "gateway address")
ip = flag.String("i", "", "ip address")
workDir = flag.String("w", "", "workdir")
privileged = flag.Bool("privileged", false, "privileged mode")
mtu = flag.Int("mtu", 1500, "interface mtu")
capAdd = flag.String("cap-add", "", "capabilities to add")
capDrop = flag.String("cap-drop", "", "capabilities to drop")
)
flag.Parse()
return &InitArgs{
User: *user,
Gateway: *gateway,
Ip: *ip,
WorkDir: *workDir,
Privileged: *privileged,
Args: flag.Args(),
Mtu: *mtu,
CapAdd: *capAdd,
CapDrop: *capDrop,
}
}
// Clear environment pollution introduced by lxc-start
func setupEnv(args *InitArgs) error {
// Get env
var env []string
dockerenv, err := os.Open(".dockerenv")
if err != nil {
return fmt.Errorf("Unable to load environment variables: %v", err)
}
defer dockerenv.Close()
if err := json.NewDecoder(dockerenv).Decode(&env); err != nil {
return fmt.Errorf("Unable to decode environment variables: %v", err)
}
// Propagate the plugin-specific container env variable
env = append(env, "container="+os.Getenv("container"))
args.Env = env
os.Clearenv()
for _, kv := range args.Env {
parts := strings.SplitN(kv, "=", 2)
if len(parts) == 1 {
parts = append(parts, "")
}
os.Setenv(parts[0], parts[1])
}
return nil
}
// Setup working directory
func setupWorkingDirectory(args *InitArgs) error {
if args.WorkDir == "" {
return nil
}
if err := syscall.Chdir(args.WorkDir); err != nil {
return fmt.Errorf("Unable to change dir to %v: %v", args.WorkDir, err)
}
return nil
}

View File

@@ -7,7 +7,9 @@ import (
"time"
// TODO Windows: Factor out ulimit
"github.com/docker/docker/daemon/network"
"github.com/docker/docker/pkg/ulimit"
"github.com/docker/docker/runconfig"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/configs"
)
@@ -150,31 +152,34 @@ type ProcessConfig struct {
//
// Process wrapps an os/exec.Cmd to add more metadata
type Command struct {
ID string `json:"id"`
Rootfs string `json:"rootfs"` // root fs of the container
ReadonlyRootfs bool `json:"readonly_rootfs"`
InitPath string `json:"initpath"` // dockerinit
WorkingDir string `json:"working_dir"`
ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver
Network *Network `json:"network"`
Ipc *Ipc `json:"ipc"`
Pid *Pid `json:"pid"`
UTS *UTS `json:"uts"`
Resources *Resources `json:"resources"`
Mounts []Mount `json:"mounts"`
AllowedDevices []*configs.Device `json:"allowed_devices"`
AutoCreatedDevices []*configs.Device `json:"autocreated_devices"`
CapAdd []string `json:"cap_add"`
CapDrop []string `json:"cap_drop"`
GroupAdd []string `json:"group_add"`
ContainerPid int `json:"container_pid"` // the pid for the process inside a container
ProcessConfig ProcessConfig `json:"process_config"` // Describes the init process of the container.
ProcessLabel string `json:"process_label"`
MountLabel string `json:"mount_label"`
LxcConfig []string `json:"lxc_config"`
AppArmorProfile string `json:"apparmor_profile"`
CgroupParent string `json:"cgroup_parent"` // The parent cgroup for this command.
FirstStart bool `json:"first_start"`
LayerPaths []string `json:"layer_paths"` // Windows needs to know the layer paths and folder for a command
LayerFolder string `json:"layer_folder"`
ID string `json:"id"`
Rootfs string `json:"rootfs"` // root fs of the container
ReadonlyRootfs bool `json:"readonly_rootfs"`
InitPath string `json:"initpath"` // dockerinit
WorkingDir string `json:"working_dir"`
ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver
Network *Network `json:"network"`
Ipc *Ipc `json:"ipc"`
Pid *Pid `json:"pid"`
UTS *UTS `json:"uts"`
Resources *Resources `json:"resources"`
Mounts []Mount `json:"mounts"`
AllowedDevices []*configs.Device `json:"allowed_devices"`
AutoCreatedDevices []*configs.Device `json:"autocreated_devices"`
CapAdd []string `json:"cap_add"`
CapDrop []string `json:"cap_drop"`
GroupAdd []string `json:"group_add"`
ContainerPid int `json:"container_pid"` // the pid for the process inside a container
ProcessConfig ProcessConfig `json:"process_config"` // Describes the init process of the container.
ProcessLabel string `json:"process_label"`
MountLabel string `json:"mount_label"`
LxcConfig []string `json:"lxc_config"`
AppArmorProfile string `json:"apparmor_profile"`
CgroupParent string `json:"cgroup_parent"` // The parent cgroup for this command.
FirstStart bool `json:"first_start"`
LayerPaths []string `json:"layer_paths"` // Windows needs to know the layer paths and folder for a command
LayerFolder string `json:"layer_folder"`
NetworkSettings *network.Settings `json:"network_settings"`
EndpointInfo []map[string]interface{} `json:"endpoint_info"`
HostConfig *runconfig.HostConfig
}

View File

@@ -8,13 +8,17 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/daemon/execdriver/clr"
"github.com/docker/docker/daemon/execdriver/lxc"
"github.com/docker/docker/daemon/execdriver/native"
"github.com/docker/docker/pkg/sysinfo"
)
func NewDriver(name string, options []string, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) {
rootPath := path.Join(root, "execdriver", name)
switch name {
case "clr":
return clr.NewDriver(rootPath, libPath, initPath, sysInfo.AppArmor)
case "lxc":
// we want to give the lxc driver the full docker root because it needs
// to access and write config and template files in /var/lib/docker/containers/*
@@ -22,7 +26,7 @@ func NewDriver(name string, options []string, root, libPath, initPath string, sy
logrus.Warn("LXC built-in support is deprecated.")
return lxc.NewDriver(root, libPath, initPath, sysInfo.AppArmor)
case "native":
return native.NewDriver(path.Join(root, "execdriver", "native"), initPath, options)
return native.NewDriver(rootPath, initPath, options)
}
return nil, fmt.Errorf("unknown exec driver %s", name)
}

View File

@@ -138,6 +138,30 @@ func (m *containerMonitor) Start() error {
m.lastStartTime = time.Now()
// Make the network settings available to the execution
// driver to allow for integration with libnetwork networking.
m.container.command.NetworkSettings = m.container.NetworkSettings
// Allow the execution driver to query memory limits
m.container.command.HostConfig = m.container.hostConfig
// Make the network endpoint details available to the execution
// driver as well.
n, _err := m.container.daemon.netController.NetworkByID(m.container.NetworkSettings.NetworkID)
if _err == nil {
var eps []map[string]interface{}
for _, ep := range n.Endpoints() {
info, err := ep.DriverInfo()
if err != nil {
continue
}
eps = append(eps, info)
}
m.container.command.EndpointInfo = eps
}
if exitStatus, err = m.container.daemon.Run(m.container, pipes, m.callback); err != nil {
// if we receive an internal error from the initial start of a container then lets
// return it instead of entering the restart loop

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"errors"
"io/ioutil"
"os"
)
var (
@@ -12,12 +13,22 @@ var (
// file to check to determine Operating System
etcOsRelease = "/etc/os-release"
// used by stateless systems like Clear Linux
altEtcOSRelease = "/usr/lib/os-release"
)
func GetOperatingSystem() (string, error) {
b, err := ioutil.ReadFile(etcOsRelease)
if err != nil {
return "", err
if _, err2 := os.Stat(altEtcOSRelease); err2 == nil {
b, err2 = ioutil.ReadFile(altEtcOSRelease)
if err2 != nil {
return "", err2
}
} else {
return "", err
}
}
if i := bytes.Index(b, []byte("PRETTY_NAME")); i >= 0 {
b = b[i+13:]

View File

@@ -89,6 +89,7 @@ type bridgeNetwork struct {
config *networkConfiguration
endpoints map[types.UUID]*bridgeEndpoint // key: endpoint id
portMapper *portmapper.PortMapper
veth *netlink.Veth
sync.Mutex
}
@@ -894,11 +895,13 @@ func (d *driver) CreateEndpoint(nid, eid types.UUID, epInfo driverapi.EndpointIn
return err
}
logrus.Warnf("network %v", n)
logrus.Warnf("veth %v", n.veth)
// Generate and add the interface pipe host <-> sandbox
veth := &netlink.Veth{
n.veth = &netlink.Veth{
LinkAttrs: netlink.LinkAttrs{Name: hostIfName, TxQLen: 0},
PeerName: containerIfName}
if err = netlink.LinkAdd(veth); err != nil {
if err = netlink.LinkAdd(n.veth); err != nil {
return err
}
@@ -1163,6 +1166,11 @@ func (d *driver) EndpointOperInfo(nid, eid types.UUID) (map[string]interface{},
m[netlabel.MacAddress] = ep.macAddress
}
// Add details of the bridge
m[netlabel.BridgeName] = n.config.BridgeName
m[netlabel.BridgePeername] = n.veth.PeerName
m[netlabel.BridgeLinkName] = n.veth.LinkAttrs.Name
return m, nil
}

View File

@@ -24,6 +24,17 @@ const (
//EnableIPv6 constant represents enabling IPV6 at network level
EnableIPv6 = Prefix + ".enable_ipv6"
// BridgeName constant represents the name of the network bridge
BridgeName = "io.docker.network.bridge.name"
// BridgePeername constant represents the interface name provided to
// the container.
BridgePeername = "io.docker.network.bridge.peername"
// BridgeLinkName constant represents the interface name created on
// the host side.
BridgeLinkName = "io.docker.network.bridge.linkname"
// KVProvider constant represents the KV provider backend
KVProvider = DriverPrefix + ".kv_provider"