testing now in different places if we should mount a volume

This commit is contained in:
chrislovecnm 2017-08-21 13:28:21 -06:00
parent b5e96ae63b
commit 7adee24ba3
4 changed files with 119 additions and 227 deletions

View File

@ -164,7 +164,73 @@ func newEc2Filter(name string, value string) *ec2.Filter {
func (a *AWSVolumes) findVolumes(request *ec2.DescribeVolumesInput) ([]*Volume, error) {
var volumes []*Volume
err := a.ec2.DescribeVolumesPages(request, func(p *ec2.DescribeVolumesOutput, lastPage bool) (shouldContinue bool) {
volumes = a.findEctdVolumes(p, volumes)
for _, v := range p.Volumes {
volumeID := aws.StringValue(v.VolumeId)
vol := &Volume{
ID: volumeID,
Info: VolumeInfo{
Description: volumeID,
},
}
state := aws.StringValue(v.State)
vol.Status = state
for _, attachment := range v.Attachments {
vol.AttachedTo = aws.StringValue(attachment.InstanceId)
if aws.StringValue(attachment.InstanceId) == a.instanceId {
vol.LocalDevice = aws.StringValue(attachment.Device)
}
}
// never mount root volumes
// these are volumes that aws sets aside for root volumes mount points
if vol.LocalDevice == "/dev/sda1" || vol.LocalDevice == "/dev/xvda" {
glog.Warningf("Not mounting: %q, since it is a root volume", vol.LocalDevice)
continue
}
skipVolume := false
for _, tag := range v.Tags {
k := aws.StringValue(tag.Key)
v := aws.StringValue(tag.Value)
switch k {
case awsup.TagClusterName, "Name":
{
// Ignore
}
//case TagNameMasterId:
// id, err := strconv.Atoi(v)
// if err != nil {
// glog.Warningf("error parsing master-id tag on volume %q %s=%s; skipping volume", volumeID, k, v)
// skipVolume = true
// } else {
// vol.Info.MasterID = id
// }
default:
if strings.HasPrefix(k, awsup.TagNameEtcdClusterPrefix) {
etcdClusterName := strings.TrimPrefix(k, awsup.TagNameEtcdClusterPrefix)
spec, err := ParseEtcdClusterSpec(etcdClusterName, v)
if err != nil {
// Fail safe
glog.Warningf("error parsing etcd cluster tag %q on volume %q; skipping volume: %v", v, volumeID, err)
skipVolume = true
}
vol.Info.EtcdClusters = append(vol.Info.EtcdClusters, spec)
} else if strings.HasPrefix(k, awsup.TagNameRolePrefix) {
// Ignore
} else {
glog.Warningf("unknown tag on volume %q: %s=%s", volumeID, k, v)
}
}
}
if !skipVolume {
volumes = append(volumes, vol)
}
}
return true
})
@ -174,75 +240,6 @@ func (a *AWSVolumes) findVolumes(request *ec2.DescribeVolumesInput) ([]*Volume,
return volumes, nil
}
func (a *AWSVolumes) findEctdVolumes(p *ec2.DescribeVolumesOutput, volumes []*Volume) []*Volume {
for _, v := range p.Volumes {
volumeID := aws.StringValue(v.VolumeId)
vol := &Volume{
ID: volumeID,
Info: VolumeInfo{
Description: volumeID,
},
}
state := aws.StringValue(v.State)
vol.Status = state
for _, attachment := range v.Attachments {
vol.AttachedTo = aws.StringValue(attachment.InstanceId)
if aws.StringValue(attachment.InstanceId) == a.instanceId {
vol.LocalDevice = aws.StringValue(attachment.Device)
}
}
// skip all volumes only set to false if we find correct etcd tags
skipVolume := true
// looking for correct etcd tags
for _, tag := range v.Tags {
k := aws.StringValue(tag.Key)
v := aws.StringValue(tag.Value)
switch k {
case awsup.TagClusterName, "Name":
{
// Ignore
glog.V(8).Infof("Ignoring tag: %q", k)
}
default:
if strings.HasPrefix(k, awsup.TagNameEtcdClusterPrefix) {
etcdClusterName := strings.TrimPrefix(k, awsup.TagNameEtcdClusterPrefix)
spec, err := ParseEtcdClusterSpec(etcdClusterName, v)
if err != nil {
// Fail safe
glog.Warningf("error parsing etcd cluster tag %q on volume %q; skipping volume: %v", v, volumeID, err)
} else {
glog.V(8).Infof("adding volume etcd cluster: %q, volume id: %q", spec.ClusterKey, volumeID)
// found etcd volume and adding the drive
vol.Info.EtcdClusters = append(vol.Info.EtcdClusters, spec)
skipVolume = false
break
}
} else if strings.HasPrefix(k, awsup.TagNameRolePrefix) {
// Ignore
glog.V(8).Infof("Ignoring tag: %q", k)
} else {
glog.Warningf("unknown tag on volume %q: %s=%s", volumeID, k, v)
}
}
}
if skipVolume {
glog.Infof("Skipping volume id:%q", vol.ID)
} else {
glog.Infof("Adding volume id:%q", vol.ID)
volumes = append(volumes, vol)
}
}
return volumes
}
//func (a *AWSVolumes) FindMountedVolumes() ([]*Volume, error) {
// request := &ec2.DescribeVolumesInput{}
// request.Filters = []*ec2.Filter{
@ -329,7 +326,6 @@ func (a *AWSVolumes) AttachVolume(volume *Volume) error {
}
// Wait (forever) for volume to attach or reach a failure-to-attach condition
// FIXME we need to have this timeout. As an error can cause an infinite loop.
for {
request := &ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeID},

View File

@ -1,145 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protokube
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
)
func Test_Find_ETCD_Volumes(t *testing.T) {
awsVolumes := &AWSVolumes{
instanceId: "i-0dc7301acf2dfbc0c",
}
p := &ec2.DescribeVolumesOutput{
Volumes: []*ec2.Volume{
{
VolumeId: aws.String("vol-0c681fe311525b927"),
Tags: []*ec2.Tag{
{
Key: aws.String("KubernetesCluster"),
Value: aws.String("k8s.example.com"),
},
{
Key: aws.String("Name"),
Value: aws.String("d.etcd-main.example.com"),
},
{
Key: aws.String("k8s.io/etcd/main"),
Value: aws.String("d/d"),
},
{
Key: aws.String("k8s.io/role/master"),
Value: aws.String("1"),
},
},
Attachments: []*ec2.VolumeAttachment{
{
InstanceId: aws.String("i-0dc7301acf2dfbc0c"),
},
},
VolumeType: aws.String("gp2"),
},
{
VolumeId: aws.String("vol-0c681fe311525b926"),
Tags: []*ec2.Tag{
{
Key: aws.String("KubernetesCluster"),
Value: aws.String("k8s.example.com"),
},
{
Key: aws.String("Name"),
Value: aws.String("master-us-east-1d.masters.k8s.example.com"),
},
{
Key: aws.String("k8s.io/role/master"),
Value: aws.String("1"),
},
},
Attachments: []*ec2.VolumeAttachment{
{
InstanceId: aws.String("i-0dc7301acf2dfbc0c"),
},
},
VolumeType: aws.String("gp2"),
},
{
VolumeId: aws.String("vol-0c681fe311525b928"),
Tags: []*ec2.Tag{
{
Key: aws.String("KubernetesClusters"),
Value: aws.String("k8s.example.com"),
},
{
Key: aws.String("NameFOO"),
Value: aws.String("master-us-east-1d.masters.k8s.example.com"),
},
{
Key: aws.String("k8s.io/role/bar"),
Value: aws.String("1"),
},
},
Attachments: []*ec2.VolumeAttachment{
{
InstanceId: aws.String("i-0dc7301acf2dfbc0c"),
},
},
VolumeType: aws.String("gp2"),
},
{
VolumeId: aws.String("vol-0c681fe311525b926"),
Tags: []*ec2.Tag{
{
Key: aws.String("KubernetesCluster"),
Value: aws.String("k8s.example.com"),
},
{
Key: aws.String("Name"),
Value: aws.String("d.etcd-events.example.com"),
},
{
Key: aws.String("k8s.io/etcd/events"),
Value: aws.String("d/d"),
},
{
Key: aws.String("k8s.io/role/master"),
Value: aws.String("1"),
},
},
Attachments: []*ec2.VolumeAttachment{
{
InstanceId: aws.String("i-0dc7301acf2dfbc0c"),
},
},
VolumeType: aws.String("gp2"),
},
},
}
var volumes []*Volume
volumes = awsVolumes.findEctdVolumes(p, volumes)
if len(volumes) == 0 {
t.Fatalf("volumes len should not be zero")
}
if len(volumes) != 2 {
t.Fatalf("volumes len should be two")
}
}

View File

@ -52,12 +52,6 @@ func (k *VolumeMountController) mountMasterVolumes() ([]*Volume, error) {
}
for _, v := range attached {
// Do not ever try to mount root devices
if v.LocalDevice == "/dev/sda1" || v.LocalDevice == "/dev/xvda" {
glog.Warningf("local device: %q, volume id: %q is being skipped and will not mounted, since it is a root volume", v.LocalDevice, v.ID)
continue
}
existing := k.mounted[v.ID]
if existing != nil {
continue
@ -99,11 +93,6 @@ func (k *VolumeMountController) mountMasterVolumes() ([]*Volume, error) {
}
func (k *VolumeMountController) safeFormatAndMount(device string, mountpoint string, fstype string) error {
// Do not attempt to mount root volumes ever
if device == "/dev/sda1" || device == "/dev/xvda" {
glog.Warningf("volume: %q is being skipped and will not be formatted and mounted, since it is a root volume", device)
return nil
}
// Wait for the device to show up
for {
_, err := os.Stat(pathFor(device))
@ -196,6 +185,10 @@ func (k *VolumeMountController) attachMasterVolumes() ([]*Volume, error) {
var tryAttach []*Volume
var attached []*Volume
for _, v := range volumes {
if doNotMountVolume(v) {
continue
}
if v.AttachedTo == "" {
tryAttach = append(tryAttach, v)
}
@ -256,6 +249,15 @@ func (k *VolumeMountController) attachMasterVolumes() ([]*Volume, error) {
return attached, nil
}
// doNotMountVolume tests that the volume has an Etcd Cluster associated
func doNotMountVolume(v *Volume) bool {
if len(v.Info.EtcdClusters) == 0 {
glog.Warningf("Local device: %q, volume id: %q is being skipped and will not mounted, since it does not have a etcd cluster", v.LocalDevice, v.ID)
return true
}
return false
}
// ByEtcdClusterName sorts volumes so that we mount in a consistent order,
// and in addition we try to mount the main etcd volume before the events etcd volume
type ByEtcdClusterName []*Volume

View File

@ -57,3 +57,42 @@ func Test_VolumeSort_ByEtcdClusterName(t *testing.T) {
}
}
func Test_Mount_Volumes(t *testing.T) {
grid := []struct {
volume *Volume
doNotMount bool
description string
}{
{
&Volume{
LocalDevice: "/dev/xvda",
},
true,
"xda without a etcd cluster, do not mount",
},
{
&Volume{
LocalDevice: "/dev/xvdb",
Info: VolumeInfo{
EtcdClusters: []*EtcdClusterSpec{
{
ClusterKey: "foo",
NodeName: "bar",
},
},
},
},
true,
"xdb with a etcd cluster, mount",
},
}
for _, g := range grid {
d := doNotMountVolume(g.volume)
if d && !g.doNotMount {
t.Fatalf("volume mount should not have mounted: %s, description: %s", g.volume.LocalDevice, g.description)
}
}
}