mirror of https://github.com/kubernetes/kops.git
Changes
- removed all the systemd unit creation and use the volume mount code from kubele (SafeFormatAndMount) - added some documentation to highlight the feature and show how it might be used in both ebs and ephemeral storage
This commit is contained in:
parent
df2d8dd304
commit
6c814f3e73
|
@ -112,6 +112,101 @@ spec:
|
|||
rootVolumeIops: 200
|
||||
```
|
||||
|
||||
## Adding additional storage to the instance groups
|
||||
|
||||
You can add additional storage _(note, presently confined to AWS)_ via the instancegroup specification.
|
||||
|
||||
```YAML
|
||||
---
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
labels:
|
||||
kops.k8s.io/cluster: my-beloved-cluster
|
||||
name: compute
|
||||
spec:
|
||||
cloudLabels:
|
||||
role: compute
|
||||
image: coreos.com/CoreOS-stable-1855.4.0-hvm
|
||||
machineType: m4.large
|
||||
...
|
||||
volumes:
|
||||
- device: /dev/xvdd
|
||||
encrypted: true
|
||||
size: 20
|
||||
type: gp2
|
||||
```
|
||||
|
||||
In AWS the above to add an additional 20gb EBS volume to the launchconfiguration and this each node within the instancegroup.
|
||||
|
||||
## Automatically formatting and mounting the additional storage
|
||||
|
||||
You can add additional storage via the above `volumes` collection though this only provisions the storage itself. Assuming you don't wish to handle the mechanics of formatting and mounting the device yourself _(perhaps via a hook)_ you can utilize the `volumeMounts` section of the instancegroup to handle this for you.
|
||||
|
||||
```
|
||||
---
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
labels:
|
||||
kops.k8s.io/cluster: my-beloved-cluster
|
||||
name: compute
|
||||
spec:
|
||||
cloudLabels:
|
||||
role: compute
|
||||
image: coreos.com/CoreOS-stable-1855.4.0-hvm
|
||||
machineType: m4.large
|
||||
...
|
||||
volumeMounts:
|
||||
- device: /dev/xvdd
|
||||
filesystem: ext4
|
||||
path: /var/lib/docker
|
||||
volumes:
|
||||
- device: /dev/xvdd
|
||||
encrypted: true
|
||||
size: 20
|
||||
type: gp2
|
||||
```
|
||||
|
||||
The above will provision the additional storage, format and mount the device into the node. Note this feature is purposely distinct from `volumes` so that it may be reused in areas such as ephemeral storage. Using a `c5d.large` instance as an example, which comes with a 50gb SSD drive; we can use the `volumeMounts` to mount this into `/var/lib/docker` for us.
|
||||
|
||||
```YAML
|
||||
---
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
labels:
|
||||
kops.k8s.io/cluster: my-beloved-cluster
|
||||
name: compute
|
||||
spec:
|
||||
cloudLabels:
|
||||
role: compute
|
||||
image: coreos.com/CoreOS-stable-1855.4.0-hvm
|
||||
machineType: c5d.large
|
||||
...
|
||||
volumeMounts:
|
||||
- device: /dev/xvdd
|
||||
filesystem: ext4
|
||||
path: /data
|
||||
# -- mount the instance storage --
|
||||
- device: /dev/nvme2n1
|
||||
filesystem: ext4
|
||||
path: /var/lib/docker
|
||||
volumes:
|
||||
- device: /dev/nvme1n1
|
||||
encrypted: true
|
||||
size: 20
|
||||
type: gp2
|
||||
```
|
||||
|
||||
```shell
|
||||
$ df -h | grep nvme[12]
|
||||
/dev/nvme1n1 20G 45M 20G 1% /data
|
||||
/dev/nvme2n1 46G 633M 45G 2% /var/lib/docker
|
||||
```
|
||||
|
||||
> Note: at present its up to the user ensure the correct device names.
|
||||
|
||||
## Creating a new instance group
|
||||
|
||||
Suppose you want to add a new group of nodes, perhaps with a different instance type. You do this using `kops create ig <InstanceGroupName> --subnet <zone(s)>`. Currently the
|
||||
|
|
|
@ -66,6 +66,7 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/golang/glog"
|
||||
|
@ -103,6 +104,43 @@ func (c *NodeupModelContext) EnsureSystemdSuffix(name string) string {
|
|||
return name
|
||||
}
|
||||
|
||||
// EnsureDirectory ensures the directory exists or creates it
|
||||
func (c *NodeupModelContext) EnsureDirectory(path string) error {
|
||||
st, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return os.MkdirAll(path, 0755)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if !st.IsDir() {
|
||||
return fmt.Errorf("path: %s already exists but is not a directory", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsMounted checks if the device is mount
|
||||
func (c *NodeupModelContext) IsMounted(m mount.Interface, device, path string) (bool, error) {
|
||||
list, err := m.List()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, x := range list {
|
||||
if x.Device == device {
|
||||
glog.V(3).Infof("Found mountpoint device: %s, path: %s, type: %s", x.Device, x.Path, x.Type)
|
||||
if strings.TrimSuffix(x.Path, "/") == strings.TrimSuffix(path, "/") {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// PathSrvKubernetes returns the path for the kubernetes service files
|
||||
func (c *NodeupModelContext) PathSrvKubernetes() string {
|
||||
switch c.Distribution {
|
||||
|
|
|
@ -123,11 +123,6 @@ func (h *HookBuilder) buildSystemdService(name string, hook *kops.HookSpec) (*no
|
|||
unit.Set("Unit", "Before", x)
|
||||
}
|
||||
|
||||
if h.UseVolumeMounts() {
|
||||
unit.Set("Unit", "Requires", h.VolumesServiceName())
|
||||
unit.Set("Unit", "After", h.VolumesServiceName())
|
||||
}
|
||||
|
||||
// are we a raw unit file or a docker exec?
|
||||
switch hook.ExecContainer {
|
||||
case nil:
|
||||
|
@ -168,11 +163,6 @@ func (h *HookBuilder) buildDockerService(unit *systemd.Manifest, hook *kops.Hook
|
|||
dockerPullCommand := systemd.EscapeCommand([]string{"/usr/bin/docker", "pull", hook.ExecContainer.Image})
|
||||
|
||||
unit.Set("Unit", "Requires", "docker.service")
|
||||
if h.UseVolumeMounts() {
|
||||
unit.Set("Unit", "Requires", h.VolumesServiceName())
|
||||
unit.Set("Unit", "After", h.VolumesServiceName())
|
||||
}
|
||||
|
||||
unit.Set("Service", "ExecStartPre", dockerPullCommand)
|
||||
unit.Set("Service", "ExecStart", dockerRunCommand)
|
||||
unit.Set("Service", "Type", "oneshot")
|
||||
|
|
|
@ -250,11 +250,6 @@ func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
|
|||
manifest.Set("Unit", "Documentation", "https://github.com/kubernetes/kubernetes")
|
||||
manifest.Set("Unit", "After", "docker.service")
|
||||
|
||||
if b.UseVolumeMounts() {
|
||||
manifest.Set("Unit", "Requires", b.VolumesServiceName())
|
||||
manifest.Set("Unit", "After", b.VolumesServiceName())
|
||||
}
|
||||
|
||||
if b.Distribution == distros.DistributionCoreOS {
|
||||
// We add /opt/kubernetes/bin for our utilities (socat, conntrack)
|
||||
manifest.Set("Service", "Environment", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/kubernetes/bin")
|
||||
|
|
|
@ -80,12 +80,6 @@ func (b *NodeAuthorizationBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
man.Set("Unit", "After", "docker.service")
|
||||
man.Set("Unit", "Before", "kubelet.service")
|
||||
|
||||
// @step: ensure we start after the volumes have been mounted
|
||||
if b.UseVolumeMounts() {
|
||||
man.Set("Unit", "Requires", b.VolumesServiceName())
|
||||
man.Set("Unit", "After", b.VolumesServiceName())
|
||||
}
|
||||
|
||||
clientCert := filepath.Join(b.PathSrvKubernetes(), authorizerDir, "tls.pem")
|
||||
man.Set("Service", "Type", "oneshot")
|
||||
man.Set("Service", "RemainAfterExit", "yes")
|
||||
|
|
|
@ -141,10 +141,6 @@ func (t *ProtokubeBuilder) buildSystemdService() (*nodetasks.Service, error) {
|
|||
manifest.Set("Unit", "Documentation", "https://github.com/kubernetes/kops")
|
||||
|
||||
// @step: let need a dependency for any volumes to be mounted first
|
||||
if t.UseVolumeMounts() {
|
||||
manifest.Set("Unit", "Requires", t.VolumesServiceName())
|
||||
manifest.Set("Unit", "After", t.VolumesServiceName())
|
||||
}
|
||||
manifest.Set("Service", "ExecStartPre", t.ProtokubeImagePullCommand())
|
||||
manifest.Set("Service", "ExecStart", protokubeCommand)
|
||||
manifest.Set("Service", "Restart", "always")
|
||||
|
|
|
@ -18,14 +18,11 @@ package model
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/systemd"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
// VolumesBuilder maintains the volume mounting
|
||||
|
@ -39,113 +36,39 @@ var _ fi.ModelBuilder = &VolumesBuilder{}
|
|||
func (b *VolumesBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
// @step: check if the instancegroup has any volumes to mount
|
||||
if !b.UseVolumeMounts() {
|
||||
glog.V(1).Info("skipping the volume builder, no volumes defined for this instancegroup")
|
||||
glog.V(1).Info("Skipping the volume builder, no volumes defined for this instancegroup")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var mountUnits []string
|
||||
|
||||
// @step: iterate the volume mounts and create the format and mount units
|
||||
// @step: iterate the volume mounts and attempt to mount the devices
|
||||
for _, x := range b.InstanceGroup.Spec.VolumeMounts {
|
||||
glog.V(2).Infof("attempting to provision device: %s, path: %s", x.Device, x.Path)
|
||||
|
||||
// @step: create the formatting unit
|
||||
fsvc, err := buildDeviceFormatService(c, x)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to provision format service for device: %s, error: %s", x.Device, err)
|
||||
}
|
||||
c.AddTask(fsvc)
|
||||
|
||||
msvc, err := buildDeviceMountService(c, x, fsvc.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to provision format service for device: %s, error: %s", x.Device, err)
|
||||
}
|
||||
c.AddTask(msvc)
|
||||
|
||||
mountUnits = append(mountUnits, msvc.Name)
|
||||
// @check the directory exists, else create it
|
||||
if err := b.EnsureDirectory(x.Path); err != nil {
|
||||
return fmt.Errorf("Failed to ensure the directory: %s, error: %s", x.Path, err)
|
||||
}
|
||||
|
||||
// @step: create a unit for restart the docker daemon once everything is mounted
|
||||
u := &systemd.Manifest{}
|
||||
u.Set("Unit", "Description", "Used to start the docker daemon post volume mounts")
|
||||
for _, x := range mountUnits {
|
||||
u.Set("Unit", "After", x)
|
||||
u.Set("Unit", "Requires", x)
|
||||
m := &mount.SafeFormatAndMount{
|
||||
Exec: mount.NewOsExec(),
|
||||
Interface: mount.New(""),
|
||||
}
|
||||
u.Set("Service", "Type", "oneshot")
|
||||
u.Set("Service", "RemainAfterExit", "yes")
|
||||
u.Set("Service", "ExecStartPre", "/usr/bin/systemctl restart docker.service")
|
||||
u.Set("Service", "ExecStart", "/usr/bin/systemctl restart --no-block kops-configuration.service")
|
||||
|
||||
c.AddTask(&nodetasks.Service{
|
||||
Name: b.EnsureSystemdSuffix(b.VolumesServiceName()),
|
||||
Definition: s(u.Render()),
|
||||
Enabled: fi.Bool(true),
|
||||
ManageState: fi.Bool(true),
|
||||
Running: fi.Bool(true),
|
||||
SmartRestart: fi.Bool(true),
|
||||
})
|
||||
// @check if the device is already mounted
|
||||
if found, err := b.IsMounted(m, x.Device, x.Path); err != nil {
|
||||
return fmt.Errorf("Failed to check if device: %s is mounted, error: %s", x.Device, err)
|
||||
} else if found {
|
||||
glog.V(3).Infof("Skipping device: %s, path: %s as already mounted", x.Device, x.Path)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.Infof("Attempting to format and mount device: %s, path: %s", x.Device, x.Path)
|
||||
|
||||
if err := m.FormatAndMount(x.Device, x.Path, x.Filesystem, x.MountOptions); err != nil {
|
||||
glog.Errorf("Failed to mount the device: %s on: %s, error: %s", x.Device, x.Path, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildDeviceFormatService is responsible for constructing the systemd unit to format the device
|
||||
func buildDeviceFormatService(c *fi.ModelBuilderContext, volume *kops.VolumeMountSpec) (*nodetasks.Service, error) {
|
||||
device := volume.Device
|
||||
deviceFmt := strings.TrimPrefix(strings.Replace(device, "/", "-", -1), "-")
|
||||
name := fmt.Sprintf("format-%s.service", deviceFmt)
|
||||
|
||||
u := &systemd.Manifest{}
|
||||
u.Set("Unit", "Description", fmt.Sprintf("Formats the device: %s", device))
|
||||
u.Set("Unit", "After", fmt.Sprintf("%s.device", deviceFmt))
|
||||
u.Set("Unit", "Requires", fmt.Sprintf("%s.device", deviceFmt))
|
||||
u.Set("Service", "Type", "oneshot")
|
||||
u.Set("Service", "RemainAfterExit", "yes")
|
||||
|
||||
// @TODO this was written to work on CoreOS need to check other OS's, add a switch on the distro and potentionally an api override
|
||||
command := fmt.Sprintf("/usr/bin/bash -c '/usr/sbin/blkid %s || (/usr/sbin/wipefs -f %s && /usr/sbin/mkfs.%s %s)'",
|
||||
device, device, volume.Filesystem, device)
|
||||
|
||||
u.Set("Service", "ExecStart", command)
|
||||
|
||||
return &nodetasks.Service{
|
||||
Name: name,
|
||||
Definition: s(u.Render()),
|
||||
Enabled: fi.Bool(true),
|
||||
ManageState: fi.Bool(true),
|
||||
Running: fi.Bool(true),
|
||||
SmartRestart: fi.Bool(true),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// buildDeviceMountService is responsible for building the mount service
|
||||
func buildDeviceMountService(c *fi.ModelBuilderContext, volume *kops.VolumeMountSpec, formatName string) (*nodetasks.Service, error) {
|
||||
device := volume.Device
|
||||
mountpath := volume.Path
|
||||
name := fmt.Sprintf("%s.mount", strings.TrimPrefix(strings.Replace(mountpath, "/", "-", -1), "-"))
|
||||
|
||||
// @step: create the mounting unit
|
||||
u := &systemd.Manifest{}
|
||||
u.Set("Unit", "Description", fmt.Sprintf("Mounting volume: %s from device: %s", mountpath, device))
|
||||
u.Set("Unit", "Requires", formatName)
|
||||
u.Set("Unit", "After", formatName)
|
||||
u.Set("Unit", "Before", "docker.service")
|
||||
u.Set("Mount", "What", device)
|
||||
u.Set("Mount", "Where", mountpath)
|
||||
u.Set("Mount", "Type", volume.Filesystem)
|
||||
u.Set("Mount", "Options", "defaults")
|
||||
|
||||
return &nodetasks.Service{
|
||||
Name: name,
|
||||
Definition: s(u.Render()),
|
||||
Enabled: fi.Bool(true),
|
||||
ManageState: fi.Bool(true),
|
||||
Running: fi.Bool(true),
|
||||
SmartRestart: fi.Bool(true),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func santizeDeviceName(name string) string {
|
||||
return strings.TrimPrefix(strings.Replace(name, "/", "-", -1), "-")
|
||||
}
|
||||
|
|
|
@ -803,13 +803,11 @@ func DeepValidate(c *kops.Cluster, groups []*kops.InstanceGroup, strict bool) er
|
|||
return errs[0]
|
||||
}
|
||||
default:
|
||||
for _, x := range groups {
|
||||
if len(x.Spec.Volumes) > 0 {
|
||||
if len(g.Spec.Volumes) > 0 {
|
||||
return errors.New("instancegroup volumes are only available with aws at present")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue