395 lines
14 KiB
Go
395 lines
14 KiB
Go
/*
|
|
Copyright 2017 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package smb
|
|
|
|
import (
|
|
"fmt"
|
|
"io/ioutil"
|
|
"os"
|
|
"runtime"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
"k8s.io/klog/v2"
|
|
"k8s.io/kubernetes/pkg/volume"
|
|
|
|
"google.golang.org/grpc/codes"
|
|
"google.golang.org/grpc/status"
|
|
|
|
"golang.org/x/net/context"
|
|
)
|
|
|
|
// NodePublishVolume mount the volume from staging to target path
|
|
func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
|
if req.GetVolumeCapability() == nil {
|
|
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
|
|
}
|
|
volumeID := req.GetVolumeId()
|
|
if len(volumeID) == 0 {
|
|
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
|
|
}
|
|
|
|
target := req.GetTargetPath()
|
|
if len(target) == 0 {
|
|
return nil, status.Error(codes.InvalidArgument, "Target path not provided")
|
|
}
|
|
|
|
source := req.GetStagingTargetPath()
|
|
if len(source) == 0 {
|
|
return nil, status.Error(codes.InvalidArgument, "Staging target not provided")
|
|
}
|
|
|
|
mountOptions := []string{"bind"}
|
|
if req.GetReadonly() {
|
|
mountOptions = append(mountOptions, "ro")
|
|
}
|
|
|
|
mnt, err := d.ensureMountPoint(target)
|
|
if err != nil {
|
|
return nil, status.Errorf(codes.Internal, "Could not mount target %q: %v", target, err)
|
|
}
|
|
if mnt {
|
|
klog.V(2).Infof("NodePublishVolume: %s is already mounted", target)
|
|
return &csi.NodePublishVolumeResponse{}, nil
|
|
}
|
|
|
|
if err = preparePublishPath(target, d.mounter); err != nil {
|
|
return nil, fmt.Errorf("prepare publish failed for %s with error: %v", target, err)
|
|
}
|
|
|
|
klog.V(2).Infof("NodePublishVolume: mounting %s at %s with mountOptions: %v volumeID(%s)", source, target, mountOptions, volumeID)
|
|
if err := d.mounter.Mount(source, target, "", mountOptions); err != nil {
|
|
if removeErr := os.Remove(target); removeErr != nil {
|
|
return nil, status.Errorf(codes.Internal, "Could not remove mount target %q: %v", target, removeErr)
|
|
}
|
|
return nil, status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err)
|
|
}
|
|
klog.V(2).Infof("NodePublishVolume: mount %s at %s volumeID(%s) successfully", source, target, volumeID)
|
|
return &csi.NodePublishVolumeResponse{}, nil
|
|
}
|
|
|
|
// NodeUnpublishVolume unmount the volume from the target path
|
|
func (d *Driver) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
|
|
volumeID := req.GetVolumeId()
|
|
if len(volumeID) == 0 {
|
|
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
|
|
}
|
|
targetPath := req.GetTargetPath()
|
|
if len(targetPath) == 0 {
|
|
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
|
}
|
|
|
|
klog.V(2).Infof("NodeUnpublishVolume: unmounting volume %s on %s", volumeID, targetPath)
|
|
err := CleanupSMBMountPoint(d.mounter, targetPath, true /*extensiveMountPointCheck*/)
|
|
if err != nil {
|
|
return nil, status.Errorf(codes.Internal, "failed to unmount target %q: %v", targetPath, err)
|
|
}
|
|
klog.V(2).Infof("NodeUnpublishVolume: unmount volume %s on %s successfully", volumeID, targetPath)
|
|
return &csi.NodeUnpublishVolumeResponse{}, nil
|
|
}
|
|
|
|
// NodeStageVolume mount the volume to a staging path
|
|
func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
|
volumeID := req.GetVolumeId()
|
|
if len(volumeID) == 0 {
|
|
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
|
|
}
|
|
|
|
volumeCapability := req.GetVolumeCapability()
|
|
if volumeCapability == nil {
|
|
return nil, status.Error(codes.InvalidArgument, "Volume capability not provided")
|
|
}
|
|
|
|
targetPath := req.GetStagingTargetPath()
|
|
if len(targetPath) == 0 {
|
|
return nil, status.Error(codes.InvalidArgument, "Staging target not provided")
|
|
}
|
|
|
|
context := req.GetVolumeContext()
|
|
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
|
|
volumeMountGroup := req.GetVolumeCapability().GetMount().GetVolumeMountGroup()
|
|
secrets := req.GetSecrets()
|
|
gidPresent, err := checkGidPresentInMountFlags(volumeMountGroup, mountFlags)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
var source, subDir string
|
|
for k, v := range context {
|
|
switch strings.ToLower(k) {
|
|
case sourceField:
|
|
source = v
|
|
case subDirField:
|
|
subDir = v
|
|
}
|
|
}
|
|
|
|
if source == "" {
|
|
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("%s field is missing, current context: %v", sourceField, context))
|
|
}
|
|
|
|
if acquired := d.volumeLocks.TryAcquire(volumeID); !acquired {
|
|
return nil, status.Errorf(codes.Aborted, volumeOperationAlreadyExistsFmt, volumeID)
|
|
}
|
|
defer d.volumeLocks.Release(volumeID)
|
|
|
|
var username, password, domain string
|
|
for k, v := range secrets {
|
|
switch strings.ToLower(k) {
|
|
case usernameField:
|
|
username = strings.TrimSpace(v)
|
|
case passwordField:
|
|
password = strings.TrimSpace(v)
|
|
case domainField:
|
|
domain = strings.TrimSpace(v)
|
|
}
|
|
}
|
|
|
|
var mountOptions, sensitiveMountOptions []string
|
|
if runtime.GOOS == "windows" {
|
|
if domain == "" {
|
|
domain = defaultDomainName
|
|
}
|
|
if !strings.Contains(username, "\\") {
|
|
username = fmt.Sprintf("%s\\%s", domain, username)
|
|
}
|
|
mountOptions = []string{username}
|
|
sensitiveMountOptions = []string{password}
|
|
} else {
|
|
if err := os.MkdirAll(targetPath, 0750); err != nil {
|
|
return nil, status.Error(codes.Internal, fmt.Sprintf("MkdirAll %s failed with error: %v", targetPath, err))
|
|
}
|
|
sensitiveMountOptions = []string{fmt.Sprintf("%s=%s,%s=%s", usernameField, username, passwordField, password)}
|
|
mountOptions = mountFlags
|
|
if !gidPresent && volumeMountGroup != "" {
|
|
mountOptions = append(mountOptions, fmt.Sprintf("gid=%s", volumeMountGroup))
|
|
}
|
|
if domain != "" {
|
|
mountOptions = append(mountOptions, fmt.Sprintf("%s=%s", domainField, domain))
|
|
}
|
|
}
|
|
|
|
klog.V(2).Infof("NodeStageVolume: targetPath(%v) volumeID(%v) context(%v) mountflags(%v) mountOptions(%v)",
|
|
targetPath, volumeID, context, mountFlags, mountOptions)
|
|
|
|
isDirMounted, err := d.ensureMountPoint(targetPath)
|
|
if err != nil {
|
|
return nil, status.Errorf(codes.Internal, "Could not mount target %s: %v", targetPath, err)
|
|
}
|
|
if isDirMounted {
|
|
klog.V(2).Infof("NodeStageVolume: already mounted volume %s on target %s", volumeID, targetPath)
|
|
} else {
|
|
if err = prepareStagePath(targetPath, d.mounter); err != nil {
|
|
return nil, fmt.Errorf("prepare stage path failed for %s with error: %v", targetPath, err)
|
|
}
|
|
if subDir != "" {
|
|
source = strings.TrimRight(source, "/")
|
|
source = fmt.Sprintf("%s/%s", source, subDir)
|
|
}
|
|
mountComplete := false
|
|
err = wait.PollImmediate(1*time.Second, 2*time.Minute, func() (bool, error) {
|
|
err := Mount(d.mounter, source, targetPath, "cifs", mountOptions, sensitiveMountOptions)
|
|
mountComplete = true
|
|
return true, err
|
|
})
|
|
if !mountComplete {
|
|
return nil, status.Error(codes.Internal, fmt.Sprintf("volume(%s) mount %q on %q failed with timeout(10m)", volumeID, source, targetPath))
|
|
}
|
|
if err != nil {
|
|
return nil, status.Error(codes.Internal, fmt.Sprintf("volume(%s) mount %q on %q failed with %v", volumeID, source, targetPath, err))
|
|
}
|
|
klog.V(2).Infof("volume(%s) mount %q on %q succeeded", volumeID, source, targetPath)
|
|
}
|
|
|
|
return &csi.NodeStageVolumeResponse{}, nil
|
|
}
|
|
|
|
// NodeUnstageVolume unmount the volume from the staging path
|
|
func (d *Driver) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
|
volumeID := req.GetVolumeId()
|
|
if len(volumeID) == 0 {
|
|
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
|
|
}
|
|
stagingTargetPath := req.GetStagingTargetPath()
|
|
if len(stagingTargetPath) == 0 {
|
|
return nil, status.Error(codes.InvalidArgument, "Staging target not provided")
|
|
}
|
|
|
|
if acquired := d.volumeLocks.TryAcquire(volumeID); !acquired {
|
|
return nil, status.Errorf(codes.Aborted, volumeOperationAlreadyExistsFmt, volumeID)
|
|
}
|
|
defer d.volumeLocks.Release(volumeID)
|
|
|
|
klog.V(2).Infof("NodeUnstageVolume: CleanupMountPoint on %s with volume %s", stagingTargetPath, volumeID)
|
|
if err := CleanupSMBMountPoint(d.mounter, stagingTargetPath, true /*extensiveMountPointCheck*/); err != nil {
|
|
return nil, status.Errorf(codes.Internal, "failed to unmount staging target %q: %v", stagingTargetPath, err)
|
|
}
|
|
|
|
klog.V(2).Infof("NodeUnstageVolume: unmount volume %s on %s successfully", volumeID, stagingTargetPath)
|
|
return &csi.NodeUnstageVolumeResponse{}, nil
|
|
}
|
|
|
|
// NodeGetCapabilities return the capabilities of the Node plugin
|
|
func (d *Driver) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
|
return &csi.NodeGetCapabilitiesResponse{
|
|
Capabilities: d.NSCap,
|
|
}, nil
|
|
}
|
|
|
|
// NodeGetInfo return info of the node on which this plugin is running
|
|
func (d *Driver) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
|
|
return &csi.NodeGetInfoResponse{
|
|
NodeId: d.NodeID,
|
|
}, nil
|
|
}
|
|
|
|
// NodeGetVolumeStats get volume stats
|
|
func (d *Driver) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {
|
|
if len(req.VolumeId) == 0 {
|
|
return nil, status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume ID was empty")
|
|
}
|
|
if len(req.VolumePath) == 0 {
|
|
return nil, status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume path was empty")
|
|
}
|
|
|
|
if _, err := os.Lstat(req.VolumePath); err != nil {
|
|
if os.IsNotExist(err) {
|
|
return nil, status.Errorf(codes.NotFound, "path %s does not exist", req.VolumePath)
|
|
}
|
|
return nil, status.Errorf(codes.Internal, "failed to stat file %s: %v", req.VolumePath, err)
|
|
}
|
|
|
|
volumeMetrics, err := volume.NewMetricsStatFS(req.VolumePath).GetMetrics()
|
|
if err != nil {
|
|
return nil, status.Errorf(codes.Internal, "failed to get metrics: %v", err)
|
|
}
|
|
|
|
available, ok := volumeMetrics.Available.AsInt64()
|
|
if !ok {
|
|
return nil, status.Errorf(codes.Internal, "failed to transform volume available size(%v)", volumeMetrics.Available)
|
|
}
|
|
capacity, ok := volumeMetrics.Capacity.AsInt64()
|
|
if !ok {
|
|
return nil, status.Errorf(codes.Internal, "failed to transform volume capacity size(%v)", volumeMetrics.Capacity)
|
|
}
|
|
used, ok := volumeMetrics.Used.AsInt64()
|
|
if !ok {
|
|
return nil, status.Errorf(codes.Internal, "failed to transform volume used size(%v)", volumeMetrics.Used)
|
|
}
|
|
|
|
inodesFree, ok := volumeMetrics.InodesFree.AsInt64()
|
|
if !ok {
|
|
return nil, status.Errorf(codes.Internal, "failed to transform disk inodes free(%v)", volumeMetrics.InodesFree)
|
|
}
|
|
inodes, ok := volumeMetrics.Inodes.AsInt64()
|
|
if !ok {
|
|
return nil, status.Errorf(codes.Internal, "failed to transform disk inodes(%v)", volumeMetrics.Inodes)
|
|
}
|
|
inodesUsed, ok := volumeMetrics.InodesUsed.AsInt64()
|
|
if !ok {
|
|
return nil, status.Errorf(codes.Internal, "failed to transform disk inodes used(%v)", volumeMetrics.InodesUsed)
|
|
}
|
|
|
|
return &csi.NodeGetVolumeStatsResponse{
|
|
Usage: []*csi.VolumeUsage{
|
|
{
|
|
Unit: csi.VolumeUsage_BYTES,
|
|
Available: available,
|
|
Total: capacity,
|
|
Used: used,
|
|
},
|
|
{
|
|
Unit: csi.VolumeUsage_INODES,
|
|
Available: inodesFree,
|
|
Total: inodes,
|
|
Used: inodesUsed,
|
|
},
|
|
},
|
|
}, nil
|
|
}
|
|
|
|
// NodeExpandVolume node expand volume
|
|
// N/A for smb
|
|
func (d *Driver) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
|
|
return nil, status.Error(codes.Unimplemented, "")
|
|
}
|
|
|
|
// ensureMountPoint: create mount point if not exists
|
|
// return <true, nil> if it's already a mounted point otherwise return <false, nil>
|
|
func (d *Driver) ensureMountPoint(target string) (bool, error) {
|
|
notMnt, err := d.mounter.IsLikelyNotMountPoint(target)
|
|
if err != nil && !os.IsNotExist(err) {
|
|
if IsCorruptedDir(target) {
|
|
notMnt = false
|
|
klog.Warningf("detected corrupted mount for targetPath [%s]", target)
|
|
} else {
|
|
return !notMnt, err
|
|
}
|
|
}
|
|
|
|
if !notMnt {
|
|
// testing original mount point, make sure the mount link is valid
|
|
_, err := ioutil.ReadDir(target)
|
|
if err == nil {
|
|
klog.V(2).Infof("already mounted to target %s", target)
|
|
return !notMnt, nil
|
|
}
|
|
// mount link is invalid, now unmount and remount later
|
|
klog.Warningf("ReadDir %s failed with %v, unmount this directory", target, err)
|
|
if err := d.mounter.Unmount(target); err != nil {
|
|
klog.Errorf("Unmount directory %s failed with %v", target, err)
|
|
return !notMnt, err
|
|
}
|
|
notMnt = true
|
|
return !notMnt, err
|
|
}
|
|
|
|
if err := makeDir(target); err != nil {
|
|
klog.Errorf("MakeDir failed on target: %s (%v)", target, err)
|
|
return !notMnt, err
|
|
}
|
|
|
|
return false, nil
|
|
}
|
|
|
|
func makeDir(pathname string) error {
|
|
err := os.MkdirAll(pathname, os.FileMode(0755))
|
|
if err != nil {
|
|
if !os.IsExist(err) {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func checkGidPresentInMountFlags(volumeMountGroup string, mountFlags []string) (bool, error) {
|
|
gidPresentInMountFlags := false
|
|
for _, mountFlag := range mountFlags {
|
|
if strings.HasPrefix(mountFlag, "gid") {
|
|
gidPresentInMountFlags = true
|
|
kvpair := strings.Split(mountFlag, "=")
|
|
if volumeMountGroup != "" && len(kvpair) == 2 && !strings.EqualFold(volumeMountGroup, kvpair[1]) {
|
|
return false, status.Error(codes.InvalidArgument, fmt.Sprintf("gid(%s) in storageClass and pod fsgroup(%s) are not equal", kvpair[1], volumeMountGroup))
|
|
}
|
|
}
|
|
}
|
|
return gidPresentInMountFlags, nil
|
|
}
|