From bb8a24a853895fb172475db1392379b234e49e6f Mon Sep 17 00:00:00 2001 From: Serge Logvinov Date: Fri, 9 Aug 2024 12:19:11 +0300 Subject: [PATCH] [cinder-csi-plugin] ephemeral volume removal (#2602) Remove openstack credits from node plugin Signed-off-by: Serge Logvinov --- charts/cinder-csi-plugin/Chart.yaml | 2 +- .../controllerplugin-deployment.yaml | 1 + .../templates/nodeplugin-daemonset.yaml | 7 +- cmd/cinder-csi-plugin/main.go | 81 +++++++++---- docs/cinder-csi-plugin/features.md | 10 +- .../using-cinder-csi-plugin.md | 4 +- pkg/csi/cinder/controllerserver.go | 13 +++ pkg/csi/cinder/driver.go | 15 ++- pkg/csi/cinder/nodeserver.go | 106 +++--------------- pkg/csi/cinder/nodeserver_test.go | 52 ++------- pkg/csi/cinder/utils.go | 13 ++- .../blockdevice/blockdevice_unsupported.go | 4 + tests/sanity/cinder/sanity_test.go | 2 +- 13 files changed, 138 insertions(+), 172 deletions(-) diff --git a/charts/cinder-csi-plugin/Chart.yaml b/charts/cinder-csi-plugin/Chart.yaml index 980f10fb0e..7790136f14 100644 --- a/charts/cinder-csi-plugin/Chart.yaml +++ b/charts/cinder-csi-plugin/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: v1.31.0 description: Cinder CSI Chart for OpenStack name: openstack-cinder-csi -version: 2.31.0 +version: 2.31.0-alpha.1 home: https://github.com/kubernetes/cloud-provider-openstack icon: https://github.com/kubernetes/kubernetes/blob/master/logo/logo.png maintainers: diff --git a/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml b/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml index 542c179f5a..03cc4f1b2e 100644 --- a/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml +++ b/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml @@ -173,6 +173,7 @@ spec: - "--endpoint=$(CSI_ENDPOINT)" - "--cloud-config=$(CLOUD_CONFIG)" - "--cluster=$(CLUSTER_NAME)" + - "--provide-node-service=false" {{- if .Values.csi.plugin.httpEndpoint.enabled }} - "--http-endpoint=:{{ .Values.csi.plugin.httpEndpoint.port }}" {{- end }} diff --git a/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml b/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml index e55e8d69ea..4fef26eba9 100644 --- a/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml +++ b/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml @@ -91,7 +91,8 @@ spec: - /bin/cinder-csi-plugin - "-v={{ .Values.logVerbosityLevel }}" - "--endpoint=$(CSI_ENDPOINT)" - - "--cloud-config=$(CLOUD_CONFIG)" + - "--provide-controller-service=false" + # - "--cloud-config=$(CLOUD_CONFIG)" {{- if .Values.csi.plugin.extraArgs }} {{- with .Values.csi.plugin.extraArgs }} {{- tpl . $ | trim | nindent 12 }} @@ -100,8 +101,8 @@ spec: env: - name: CSI_ENDPOINT value: unix://csi/csi.sock - - name: CLOUD_CONFIG - value: /etc/kubernetes/{{ .Values.secret.filename }} + # - name: CLOUD_CONFIG + # value: /etc/kubernetes/{{ .Values.secret.filename }} {{- if .Values.csi.plugin.extraEnv }} {{- toYaml .Values.csi.plugin.extraEnv | nindent 12 }} {{- end }} diff --git a/cmd/cinder-csi-plugin/main.go b/cmd/cinder-csi-plugin/main.go index 5bb50621ca..d9dbc6adbc 100644 --- a/cmd/cinder-csi-plugin/main.go +++ b/cmd/cinder-csi-plugin/main.go @@ -31,13 +31,18 @@ import ( ) var ( - endpoint string - nodeID string - cloudConfig []string - cloudNames []string - additionalTopologies map[string]string - cluster string - httpEndpoint string + endpoint string + nodeID string + cloudConfig []string + cloudNames []string + additionalTopologies map[string]string + cluster string + httpEndpoint string + + searchOrder string + rescanOnResize bool + nodeVolumeAttachLimit int64 + provideControllerService bool provideNodeService bool ) @@ -49,6 +54,17 @@ func main() { Run: func(cmd *cobra.Command, args []string) { handle() }, + PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { + f := cmd.Flags() + + if provideControllerService { + if configs, err := f.GetStringSlice("cloud-config"); err != nil || len(configs) == 0 { + klog.Fatalf("Unable to mark flag cloud-config to be required") + } + } + + return nil + }, Version: version.Version, } @@ -62,10 +78,7 @@ func main() { klog.Fatalf("Unable to mark flag endpoint to be required: %v", err) } - cmd.PersistentFlags().StringSliceVar(&cloudConfig, "cloud-config", nil, "CSI driver cloud config. This option can be given multiple times") - if err := cmd.MarkPersistentFlagRequired("cloud-config"); err != nil { - klog.Fatalf("Unable to mark flag cloud-config to be required: %v", err) - } + cmd.Flags().StringSliceVar(&cloudConfig, "cloud-config", nil, "CSI driver cloud config. This option can be given multiple times") cmd.PersistentFlags().StringSliceVar(&cloudNames, "cloud-name", []string{""}, "Cloud name to instruct CSI driver to read additional OpenStack cloud credentials from the configuration subsections. This option can be specified multiple times to manage multiple OpenStack clouds.") cmd.PersistentFlags().StringToStringVar(&additionalTopologies, "additional-topology", map[string]string{}, "Additional CSI driver topology keys, for example topology.kubernetes.io/region=REGION1. This option can be specified multiple times to add multiple additional topology keys.") @@ -76,6 +89,12 @@ func main() { cmd.PersistentFlags().BoolVar(&provideControllerService, "provide-controller-service", true, "If set to true then the CSI driver does provide the controller service (default: true)") cmd.PersistentFlags().BoolVar(&provideNodeService, "provide-node-service", true, "If set to true then the CSI driver does provide the node service (default: true)") + cmd.PersistentFlags().StringVar(&searchOrder, "search-order", "configDrive,metadataService", "The search order for metadata service") + + // Node specific flags + cmd.PersistentFlags().BoolVar(&rescanOnResize, "rescan-on-resize", false, "If set to true then the CSI driver will rescan the device on volume resize") + cmd.PersistentFlags().Int64Var(&nodeVolumeAttachLimit, "node-volume-attach-limit", 256, "The maximum number of volumes that can be attached to a node") + openstack.AddExtraFlags(pflag.CommandLine) code := cli.Run(cmd) @@ -87,17 +106,19 @@ func handle() { d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster}) openstack.InitOpenStackProvider(cloudConfig, httpEndpoint) - var err error - clouds := make(map[string]openstack.IOpenStack) - for _, cloudName := range cloudNames { - clouds[cloudName], err = openstack.GetOpenStackProvider(cloudName) - if err != nil { - klog.Warningf("Failed to GetOpenStackProvider %s: %v", cloudName, err) - return - } - } if provideControllerService { + clouds := make(map[string]openstack.IOpenStack, len(cloudNames)) + for _, cloudName := range cloudNames { + var err error + + clouds[cloudName], err = openstack.GetOpenStackProvider(cloudName) + if err != nil { + klog.Warningf("Failed to GetOpenStackProvider %s: %v", cloudName, err) + return + } + } + d.SetupControllerService(clouds) } @@ -105,10 +126,26 @@ func handle() { //Initialize mount mount := mount.GetMountProvider() + //Backward compatibility, read [BlockStorage] parameters from cloud config + cfg, err := openstack.GetConfigFromFiles(cloudConfig) + if err == nil { + if cfg.Metadata.SearchOrder != "" { + searchOrder = cfg.Metadata.SearchOrder + } + + if cfg.BlockStorage.RescanOnResize { + rescanOnResize = cfg.BlockStorage.RescanOnResize + } + + if cfg.BlockStorage.NodeVolumeAttachLimit < nodeVolumeAttachLimit { + nodeVolumeAttachLimit = cfg.BlockStorage.NodeVolumeAttachLimit + } + } + //Initialize Metadata - metadata := metadata.GetMetadataProvider(clouds[cloudNames[0]].GetMetadataOpts().SearchOrder) + metadata := metadata.GetMetadataProvider(searchOrder) - d.SetupNodeService(clouds[cloudNames[0]], mount, metadata, additionalTopologies) + d.SetupNodeService(mount, metadata, rescanOnResize, nodeVolumeAttachLimit, additionalTopologies) } d.Run() diff --git a/docs/cinder-csi-plugin/features.md b/docs/cinder-csi-plugin/features.md index b033ff791f..578d9f1a49 100644 --- a/docs/cinder-csi-plugin/features.md +++ b/docs/cinder-csi-plugin/features.md @@ -24,7 +24,7 @@ Dynamic Provisioning uses persistence volume claim (PVC) to request the Kubernetes to create the Cinder volume on behalf of user and consumes the volume from inside container. -For usage, refer [sample app](./examples.md#dynamic-volume-provisioning) +For usage, refer [sample app](./examples.md#dynamic-volume-provisioning) ## Topology @@ -35,7 +35,7 @@ This feature enables driver to consider the topology constraints while creating `topology.cinder.csi.openstack.org/zone` : Availability by Zone * `allowedTopologies` can be specified in storage class to restrict the topology of provisioned volumes to specific zones and should be used as replacement of `availability` parameter. * To disable: set `--feature-gates=Topology=false` in external-provisioner (container `csi-provisioner` of `csi-cinder-controllerplugin`). - * If using Helm, it can be disabled by setting `Values.csi.provisioner.topology: "false"` + * If using Helm, it can be disabled by setting `Values.csi.provisioner.topology: "false"` For usage, refer [sample app](./examples.md#use-topology) @@ -51,7 +51,7 @@ For usage, refer [sample app](./examples.md#using-block-volume) ## Volume Expansion -Driver supports both `Offline` and `Online` resize of cinder volumes. Cinder online resize support is available since cinder 3.42 microversion. +Driver supports both `Offline` and `Online` resize of cinder volumes. Cinder online resize support is available since cinder 3.42 microversion. The same should be supported by underlying OpenStack Cloud to avail the feature. * As of kubernetes v1.16, Volume Expansion is a beta feature and enabled by default. @@ -60,7 +60,7 @@ The same should be supported by underlying OpenStack Cloud to avail the feature. ### Rescan on in-use volume resize -Some hypervizors (like VMware) don't automatically send a new volume size to a Linux kernel, when a volume is in-use. Sending a "1" to `/sys/class/block/XXX/device/rescan` is telling the SCSI block device to refresh it's information about where it's ending boundary is (among other things) to give the kernel information about it's updated size. When a `rescan-on-resize` flag is set in a CSI node driver cloud-config `[BlockStorage]` section, a CSI node driver will rescan block device and verify its size before expanding the filesystem. CSI driver will raise an error, when expected volume size cannot be detected. +Some hypervizors (like VMware) don't automatically send a new volume size to a Linux kernel, when a volume is in-use. Sending a "1" to `/sys/class/block/XXX/device/rescan` is telling the SCSI block device to refresh it's information about where it's ending boundary is (among other things) to give the kernel information about it's updated size. When a `rescan-on-resize` flag is set in a CSI node driver cloud-config `[BlockStorage]` section or through the CLI parameter `--rescan-on-resize`, a CSI node driver will rescan block device and verify its size before expanding the filesystem. CSI driver will raise an error, when expected volume size cannot be detected. Not all hypervizors have a `/sys/class/block/XXX/device/rescan` location, therefore if you enable this option and your hypervizor doesn't support this, you'll get a warning log on resize event. It is recommended to disable this option in this case. @@ -81,7 +81,7 @@ Two different Kubernetes features allow volumes to follow the Pod's lifecycle: C This feature allows CSI volumes to be directly embedded in the Pod specification instead of a PersistentVolume. Volumes specified in this way are ephemeral and do not persist across Pod restarts. -* As of Kubernetes v1.16 this feature is beta so enabled by default. +* As of Kubernetes v1.16 this feature is beta so enabled by default. * To enable this feature for CSI Driver, `volumeLifecycleModes` needs to be specified in [CSIDriver](../../manifests/cinder-csi-plugin/csi-cinder-driver.yaml) object. The driver can run in `Persistent` mode, `Ephemeral` or in both modes. * `podInfoOnMount` must be `true` to use this feature. * For usage, refer [sample app](./examples.md#deploy-app-using-inline-volumes) diff --git a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md index 210d26a2c4..33bde5c6f7 100644 --- a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md +++ b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md @@ -131,7 +131,7 @@ The following sections are supported in configuration file. For Cinder CSI Plugin to authenticate with OpenStack Keystone, required parameters needs to be passed in `[Global]` section of the file. For all supported parameters, please refer [Global](../openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md#global) section. ### Block Storage -These configuration options pertain to block storage and should appear in the `[BlockStorage]` section of the `$CLOUD_CONFIG` file. +These configuration options pertain to block storage and should appear in the `[BlockStorage]` section of the `$CLOUD_CONFIG` file. Some parameters can be passed as command line arguments as well. * `node-volume-attach-limit` Optional. To configure maximum volumes that can be attached to the node. Its default value is `256`. @@ -270,7 +270,7 @@ helm install --namespace kube-system --name cinder-csi ./charts/cinder-csi-plugi | VolumeSnapshotClass `parameters` | `type` | Empty String | `snapshot` creates a VolumeSnapshot object linked to a Cinder volume snapshot. `backup` creates a VolumeSnapshot object linked to a cinder volume backup. Defaults to `snapshot` if not defined | | VolumeSnapshotClass `parameters` | `backup-max-duration-seconds-per-gb` | `20` | Defines the amount of time to wait for a backup to complete in seconds per GB of volume size | | VolumeSnapshotClass `parameters` | `availability` | Same as volume | String. Backup Availability Zone | -| Inline Volume `volumeAttributes` | `capacity` | `1Gi` | volume size for creating inline volumes| +| Inline Volume `volumeAttributes` | `capacity` | `1Gi` | volume size for creating inline volumes| | Inline Volume `VolumeAttributes` | `type` | Empty String | Name/ID of Volume type. Corresponding volume type should exist in cinder | ## Local Development diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go index a2d42eb21a..0a18c04bcb 100644 --- a/pkg/csi/cinder/controllerserver.go +++ b/pkg/csi/cinder/controllerserver.go @@ -1061,8 +1061,11 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibleTopologyReq *csi.TopologyRequirement) *csi.CreateVolumeResponse { var volsrc *csi.VolumeContentSource + resizeRequired := false if vol.SnapshotID != "" { + resizeRequired = true + volsrc = &csi.VolumeContentSource{ Type: &csi.VolumeContentSource_Snapshot{ Snapshot: &csi.VolumeContentSource_SnapshotSource{ @@ -1073,6 +1076,8 @@ func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibl } if vol.SourceVolID != "" { + resizeRequired = true + volsrc = &csi.VolumeContentSource{ Type: &csi.VolumeContentSource_Volume{ Volume: &csi.VolumeContentSource_VolumeSource{ @@ -1083,6 +1088,8 @@ func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibl } if vol.BackupID != nil && *vol.BackupID != "" { + resizeRequired = true + volsrc = &csi.VolumeContentSource{ Type: &csi.VolumeContentSource_Snapshot{ Snapshot: &csi.VolumeContentSource_SnapshotSource{ @@ -1107,12 +1114,18 @@ func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibl } } + volCnx := map[string]string{} + if resizeRequired { + volCnx[ResizeRequired] = "true" + } + resp := &csi.CreateVolumeResponse{ Volume: &csi.Volume{ VolumeId: vol.ID, CapacityBytes: int64(vol.Size * 1024 * 1024 * 1024), AccessibleTopology: accessibleTopology, ContentSource: volsrc, + VolumeContext: volCnx, }, } diff --git a/pkg/csi/cinder/driver.go b/pkg/csi/cinder/driver.go index 773b98a8cc..c7f0d6fe74 100644 --- a/pkg/csi/cinder/driver.go +++ b/pkg/csi/cinder/driver.go @@ -32,6 +32,12 @@ import ( const ( driverName = "cinder.csi.openstack.org" topologyKey = "topology." + driverName + "/zone" + + // MaxVolumesPerNode is the maximum number of volumes that can be attached to a node + MaxVolumesPerNode = 256 + + // ResizeRequired parameter, if set to true, will trigger a resize on mount operation + ResizeRequired = "resizeRequired" ) var ( @@ -177,9 +183,14 @@ func (d *Driver) SetupControllerService(clouds map[string]openstack.IOpenStack) d.cs = NewControllerServer(d, clouds) } -func (d *Driver) SetupNodeService(cloud openstack.IOpenStack, mount mount.IMount, metadata metadata.IMetadata, topologies map[string]string) { +func (d *Driver) SetupNodeService(mount mount.IMount, metadata metadata.IMetadata, rescanOnResize bool, nodeVolumeAttachLimit int64, topologies map[string]string) { klog.Info("Providing node service") - d.ns = NewNodeServer(d, mount, metadata, cloud, topologies) + + if nodeVolumeAttachLimit < 0 || nodeVolumeAttachLimit > MaxVolumesPerNode { + nodeVolumeAttachLimit = MaxVolumesPerNode + } + + d.ns = NewNodeServer(d, mount, metadata, nodeVolumeAttachLimit, rescanOnResize, topologies) } func (d *Driver) Run() { diff --git a/pkg/csi/cinder/nodeserver.go b/pkg/csi/cinder/nodeserver.go index bfa966c741..cd178d182c 100644 --- a/pkg/csi/cinder/nodeserver.go +++ b/pkg/csi/cinder/nodeserver.go @@ -24,16 +24,13 @@ import ( "strings" "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes" "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/klog/v2" utilpath "k8s.io/utils/path" - "k8s.io/cloud-provider-openstack/pkg/csi/cinder/openstack" "k8s.io/cloud-provider-openstack/pkg/util/blockdevice" - cpoerrors "k8s.io/cloud-provider-openstack/pkg/util/errors" "k8s.io/cloud-provider-openstack/pkg/util/metadata" "k8s.io/cloud-provider-openstack/pkg/util/mount" mountutil "k8s.io/mount-utils" @@ -43,8 +40,10 @@ type nodeServer struct { Driver *Driver Mount mount.IMount Metadata metadata.IMetadata - Cloud openstack.IOpenStack Topologies map[string]string + + maxVolumeAttachLimit int64 + rescanOnResize bool } func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { @@ -163,73 +162,10 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu return nil, status.Error(codes.InvalidArgument, "[NodeUnpublishVolume] volumeID must be provided") } - ephemeralVolume := false - - vol, err := ns.Cloud.GetVolume(volumeID) - if err != nil { - - if !cpoerrors.IsNotFound(err) { - return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err) - } - - // if not found by id, try to search by name - volName := fmt.Sprintf("ephemeral-%s", volumeID) - - vols, err := ns.Cloud.GetVolumesByName(volName) - - //if volume not found then GetVolumesByName returns empty list - if err != nil { - return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err) - } - if len(vols) > 0 { - vol = &vols[0] - ephemeralVolume = true - } else { - return nil, status.Errorf(codes.NotFound, "Volume not found %s", volName) - } - } - - err = ns.Mount.UnmountPath(targetPath) - if err != nil { + if err := ns.Mount.UnmountPath(targetPath); err != nil { return nil, status.Errorf(codes.Internal, "Unmount of targetpath %s failed with error %v", targetPath, err) } - if ephemeralVolume { - return nodeUnpublishEphemeral(req, ns, vol) - } - - return &csi.NodeUnpublishVolumeResponse{}, nil - -} - -func nodeUnpublishEphemeral(req *csi.NodeUnpublishVolumeRequest, ns *nodeServer, vol *volumes.Volume) (*csi.NodeUnpublishVolumeResponse, error) { - volumeID := vol.ID - var instanceID string - - if len(vol.Attachments) > 0 { - instanceID = vol.Attachments[0].ServerID - } else { - return nil, status.Error(codes.FailedPrecondition, "Volume attachment not found in request") - } - - err := ns.Cloud.DetachVolume(instanceID, volumeID) - if err != nil { - klog.V(3).Infof("Failed to DetachVolume: %v", err) - return nil, status.Error(codes.Internal, err.Error()) - } - - err = ns.Cloud.WaitDiskDetached(instanceID, volumeID) - if err != nil { - klog.V(3).Infof("Failed to WaitDiskDetached: %v", err) - return nil, status.Error(codes.Internal, err.Error()) - } - - err = ns.Cloud.DeleteVolume(volumeID) - if err != nil { - klog.V(3).Infof("Failed to DeleteVolume: %v", err) - return nil, status.Error(codes.Internal, err.Error()) - } - return &csi.NodeUnpublishVolumeResponse{}, nil } @@ -238,6 +174,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol stagingTarget := req.GetStagingTargetPath() volumeCapability := req.GetVolumeCapability() + volumeContext := req.GetVolumeContext() volumeID := req.GetVolumeId() if len(volumeID) == 0 { @@ -251,14 +188,6 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol return nil, status.Error(codes.InvalidArgument, "NodeStageVolume Volume Capability must be provided") } - vol, err := ns.Cloud.GetVolume(volumeID) - if err != nil { - if cpoerrors.IsNotFound(err) { - return nil, status.Error(codes.NotFound, "Volume not found") - } - return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err) - } - m := ns.Mount // Do not trust the path provided by cinder, get the real path on node devicePath, err := getDevicePath(volumeID, m) @@ -296,9 +225,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol } } - // Try expanding the volume if it's created from a snapshot or another volume (see #1539) - if vol.SourceVolID != "" || vol.SnapshotID != "" { - + if required, ok := volumeContext[ResizeRequired]; ok && strings.EqualFold(required, "true") { r := mountutil.NewResizeFs(ns.Mount.Mounter().Exec) needResize, err := r.NeedResize(devicePath, stagingTarget) @@ -376,12 +303,10 @@ func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoReque } topology := &csi.Topology{Segments: topologyMap} - maxVolume := ns.Cloud.GetMaxVolLimit() - return &csi.NodeGetInfoResponse{ NodeId: nodeID, AccessibleTopology: topology, - MaxVolumesPerNode: maxVolume, + MaxVolumesPerNode: ns.maxVolumeAttachLimit, }, nil } @@ -448,13 +373,16 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV if len(volumePath) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume path not provided") } + volCapability := req.GetVolumeCapability() + if volCapability != nil { + if volCapability.GetBlock() != nil { + return &csi.NodeExpandVolumeResponse{}, nil + } + } - _, err := ns.Cloud.GetVolume(volumeID) + _, err := blockdevice.IsBlockDevice(volumePath) if err != nil { - if cpoerrors.IsNotFound(err) { - return nil, status.Errorf(codes.NotFound, "Volume with ID %s not found", volumeID) - } - return nil, status.Errorf(codes.Internal, "NodeExpandVolume failed with error %v", err) + return nil, status.Errorf(codes.NotFound, "Failed to determine device path for volumePath %s: %v", volumePath, err) } output, err := ns.Mount.GetMountFs(volumePath) @@ -467,13 +395,14 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV return nil, status.Error(codes.Internal, "Unable to find Device path for volume") } - if ns.Cloud.GetBlockStorageOpts().RescanOnResize { + if ns.rescanOnResize { // comparing current volume size with the expected one newSize := req.GetCapacityRange().GetRequiredBytes() if err := blockdevice.RescanBlockDeviceGeometry(devicePath, volumePath, newSize); err != nil { return nil, status.Errorf(codes.Internal, "Could not verify %q volume size: %v", volumeID, err) } } + r := mountutil.NewResizeFs(ns.Mount.Mounter().Exec) if _, err := r.Resize(devicePath, volumePath); err != nil { return nil, status.Errorf(codes.Internal, "Could not resize volume %q: %v", volumeID, err) @@ -499,7 +428,6 @@ func getDevicePath(volumeID string, m mount.IMount) (string, error) { } return devicePath, nil - } func collectMountOptions(fsType string, mntFlags []string) []string { diff --git a/pkg/csi/cinder/nodeserver_test.go b/pkg/csi/cinder/nodeserver_test.go index f0cbc1b904..e871b6bc45 100644 --- a/pkg/csi/cinder/nodeserver_test.go +++ b/pkg/csi/cinder/nodeserver_test.go @@ -54,7 +54,7 @@ func init() { "": omock, } - fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstances[""], map[string]string{}) + fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, MaxVolumesPerNode, false, map[string]string{}) } } @@ -263,45 +263,6 @@ func TestNodeUnpublishVolume(t *testing.T) { assert.Equal(expectedRes, actualRes) } -func TestNodeUnpublishVolumeEphermeral(t *testing.T) { - mount.MInstance = mmock - metadata.MetadataService = metamock - osmock := map[string]openstack.IOpenStack{ - "": new(openstack.OpenStackMock), - } - fvolName := fmt.Sprintf("ephemeral-%s", FakeVolID) - - mmock.On("UnmountPath", FakeTargetPath).Return(nil) - omock.On("GetVolumesByName", fvolName).Return(FakeVolList, nil) - omock.On("DetachVolume", FakeNodeID, FakeVolID).Return(nil) - omock.On("WaitDiskDetached", FakeNodeID, FakeVolID).Return(nil) - omock.On("DeleteVolume", FakeVolID).Return(nil) - - d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) - fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, osmock[""], map[string]string{}) - - // Init assert - assert := assert.New(t) - - // Expected Result - expectedRes := &csi.NodeUnpublishVolumeResponse{} - - // Fake request - fakeReq := &csi.NodeUnpublishVolumeRequest{ - VolumeId: FakeVolID, - TargetPath: FakeTargetPath, - } - - // Invoke NodeUnpublishVolume - actualRes, err := fakeNse.NodeUnpublishVolume(FakeCtx, fakeReq) - if err != nil { - t.Errorf("failed to NodeUnpublishVolume: %v", err) - } - - // Assert - assert.Equal(expectedRes, actualRes) -} - // Test NodeUnstageVolume func TestNodeUnstageVolume(t *testing.T) { @@ -335,10 +296,19 @@ func TestNodeExpandVolume(t *testing.T) { // Init assert assert := assert.New(t) + // setup for test + tempDir := os.TempDir() + defer os.Remove(tempDir) + volumePath := filepath.Join(tempDir, FakeTargetPath) + err := os.MkdirAll(volumePath, 0750) + if err != nil { + t.Fatalf("Failed to set up volumepath: %v", err) + } + // Fake request fakeReq := &csi.NodeExpandVolumeRequest{ VolumeId: FakeVolName, - VolumePath: FakeDevicePath, + VolumePath: volumePath, } // Expected Result diff --git a/pkg/csi/cinder/utils.go b/pkg/csi/cinder/utils.go index 13c1ee993a..f54c630af6 100644 --- a/pkg/csi/cinder/utils.go +++ b/pkg/csi/cinder/utils.go @@ -57,13 +57,14 @@ func NewIdentityServer(d *Driver) *identityServer { } } -func NewNodeServer(d *Driver, mount mount.IMount, metadata metadata.IMetadata, cloud openstack.IOpenStack, topologies map[string]string) *nodeServer { +func NewNodeServer(d *Driver, mount mount.IMount, metadata metadata.IMetadata, maxVolumes int64, rescanOnResize bool, topologies map[string]string) *nodeServer { return &nodeServer{ - Driver: d, - Mount: mount, - Metadata: metadata, - Cloud: cloud, - Topologies: topologies, + Driver: d, + Mount: mount, + Metadata: metadata, + Topologies: topologies, + maxVolumeAttachLimit: maxVolumes, + rescanOnResize: rescanOnResize, } } diff --git a/pkg/util/blockdevice/blockdevice_unsupported.go b/pkg/util/blockdevice/blockdevice_unsupported.go index a75c69be7b..b9adad44d3 100644 --- a/pkg/util/blockdevice/blockdevice_unsupported.go +++ b/pkg/util/blockdevice/blockdevice_unsupported.go @@ -34,3 +34,7 @@ func GetBlockDeviceSize(path string) (int64, error) { func RescanBlockDeviceGeometry(devicePath string, deviceMountPath string, newSize int64) error { return errors.New("RescanBlockDeviceGeometry is not implemented for this OS") } + +func RescanDevice(devicePath string) error { + return errors.New("RescanDevice is not implemented for this OS") +} diff --git a/tests/sanity/cinder/sanity_test.go b/tests/sanity/cinder/sanity_test.go index 3eb723ddfa..199a9e9fe3 100644 --- a/tests/sanity/cinder/sanity_test.go +++ b/tests/sanity/cinder/sanity_test.go @@ -30,7 +30,7 @@ func TestDriver(t *testing.T) { fakemet := &fakemetadata{} d.SetupControllerService(openstack.OsInstances) - d.SetupNodeService(fakecloudprovider, fakemnt, fakemet, map[string]string{}) + d.SetupNodeService(fakemnt, fakemet, false, 200, map[string]string{}) // TODO: Stop call