diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 61aa05f3..4e5e5389 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -256,7 +256,7 @@ The Cloud Storage FUSE [stat metadata cache](https://cloud.google.com/storage/do - Volume attributes: - `metadataStatCacheCapacity`: Use the default value of `32Mi` if your workload involves up to 20,000 files. If your workload reads more than 20,000 files, increase the size by values of 10 MiB for every additional 6,000 files, an average of ~1,500 bytes per file. Alternatively, you can set the value to `"-1"` to let the stat cache use as much memory as needed. - `metadataTypeCacheCapacity`: Use the default value of `4Mi` if the maximum number of files within a single directory from the bucket you're mounting contains 20,000 files or less. If the maximum number of files within a single directory that you're mounting contains more than 20,000 files, increase the size by 1 MiB for every 5,000 files, an average of ~200 bytes per file. Alternatively, you can set the value to `"-1"` to let the type cache use as much memory as needed. - - `metadataCacheTtlSeconds`: Set the value to `"-1"` to bypass a TTL expiration and serve the file from the cache whenever it's available. + - `metadataCacheTTLSeconds`: Set the value to `"-1"` to bypass a TTL expiration and serve the file from the cache whenever it's available. - For example: - Inline ephemeral volume @@ -273,7 +273,7 @@ The Cloud Storage FUSE [stat metadata cache](https://cloud.google.com/storage/do bucketName: metadataStatCacheCapacity: 512Mi metadataTypeCacheCapacity: 64Mi - metadataCacheTtlSeconds: "-1" + metadataCacheTTLSeconds: "-1" ``` - PersistentVolume @@ -289,7 +289,7 @@ The Cloud Storage FUSE [stat metadata cache](https://cloud.google.com/storage/do volumeAttributes: metadataStatCacheCapacity: 512Mi metadataTypeCacheCapacity: 64Mi - metadataCacheTtlSeconds: "-1" + metadataCacheTTLSeconds: "-1" ``` - Mount options: diff --git a/pkg/csi_driver/node.go b/pkg/csi_driver/node.go index 454acf1a..91802be2 100644 --- a/pkg/csi_driver/node.go +++ b/pkg/csi_driver/node.go @@ -18,6 +18,7 @@ limitations under the License. package driver import ( + "fmt" "os" "strings" "time" @@ -110,10 +111,11 @@ func (s *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublish fuseMountOptions = joinMountOptions(fuseMountOptions, capMount.GetMountFlags()) } - fuseMountOptions, err := parseVolumeAttributes(fuseMountOptions, vc) + fuseMountOptions, skipBucketAccessCheck, err := parseVolumeAttributes(fuseMountOptions, vc) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } + klog.V(6).Infof("NodePublishVolume on volume %q has skipBucketAccessCheck %t", bucketName, skipBucketAccessCheck) if vc[VolumeContextKeyEphemeral] == TrueStr { bucketName = vc[VolumeContextKeyBucketName] @@ -137,6 +139,32 @@ func (s *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublish } defer s.volumeLocks.Release(targetPath) + // Check if the given Service Account has the access to the GCS bucket, and the bucket exists. + if bucketName != "_" && !skipBucketAccessCheck { + storageService, err := s.prepareStorageService(ctx, req.GetVolumeContext()) + if err != nil { + return nil, status.Errorf(codes.Unauthenticated, "failed to prepare storage service: %v", err) + } + defer storageService.Close() + + if exist, err := storageService.CheckBucketExists(ctx, &storage.ServiceBucket{Name: bucketName}); !exist { + code := codes.Internal + if storage.IsNotExistErr(err) { + code = codes.NotFound + } + + if storage.IsPermissionDeniedErr(err) { + code = codes.PermissionDenied + } + + if storage.IsCanceledErr(err) { + code = codes.Aborted + } + + return nil, status.Errorf(code, "failed to get GCS bucket %q: %v", bucketName, err) + } + } + // Check if the sidecar container was injected into the Pod pod, err := s.k8sClients.GetPod(ctx, vc[VolumeContextKeyPodNamespace], vc[VolumeContextKeyPodName]) if err != nil { @@ -145,7 +173,7 @@ func (s *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublish // Since the webhook mutating ordering is not definitive, // the sidecar position is not checked in the ValidatePodHasSidecarContainerInjected func. - shouldInjectedByWebhook := strings.ToLower(pod.Annotations[webhook.GcsFuseVolumeEnableAnnotation]) == "true" + shouldInjectedByWebhook := strings.ToLower(pod.Annotations[webhook.GcsFuseVolumeEnableAnnotation]) == TrueStr sidecarInjected, isInitContainer := webhook.ValidatePodHasSidecarContainerInjected(pod, false) if !sidecarInjected { if shouldInjectedByWebhook { @@ -198,6 +226,10 @@ func (s *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublish code = codes.PermissionDenied } + if strings.Contains(errMsgStr, "bucket doesn't exist") { + code = codes.NotFound + } + return nil, status.Errorf(code, "the sidecar container failed with error: %v", errMsgStr) } @@ -322,3 +354,14 @@ func (s *nodeServer) isDirMounted(targetPath string) (bool, error) { return false, nil } + +// prepareStorageService prepares the GCS Storage Service using the Kubernetes Service Account from VolumeContext. +func (s *nodeServer) prepareStorageService(ctx context.Context, vc map[string]string) (storage.Service, error) { + ts := s.driver.config.TokenManager.GetTokenSourceFromK8sServiceAccount(vc[VolumeContextKeyPodNamespace], vc[VolumeContextKeyServiceAccountName], vc[VolumeContextKeyServiceAccountToken]) + storageService, err := s.storageServiceManager.SetupService(ctx, ts) + if err != nil { + return nil, fmt.Errorf("storage service manager failed to setup service: %w", err) + } + + return storageService, nil +} diff --git a/pkg/csi_driver/utils.go b/pkg/csi_driver/utils.go index d9713ae5..97430560 100644 --- a/pkg/csi_driver/utils.go +++ b/pkg/csi_driver/utils.go @@ -47,6 +47,7 @@ const ( VolumeContextKeyMetadataTypeCacheCapacity = "metadataTypeCacheCapacity" VolumeContextKeyMetadataCacheTTLSeconds = "metadataCacheTTLSeconds" VolumeContextKeyGcsfuseLoggingSeverity = "gcsfuseLoggingSeverity" + VolumeContextKeySkipCSIBucketAccessCheck = "skipCSIBucketAccessCheck" ) func NewVolumeCapabilityAccessMode(mode csi.VolumeCapability_AccessMode_Mode) *csi.VolumeCapability_AccessMode { @@ -164,11 +165,11 @@ var volumeAttributesToMountOptionsMapping = map[string]string{ } // parseVolumeAttributes parses volume attributes and convert them to gcsfuse mount options. -func parseVolumeAttributes(fuseMountOptions []string, volumeContext map[string]string) ([]string, error) { +func parseVolumeAttributes(fuseMountOptions []string, volumeContext map[string]string) ([]string, bool, error) { if mountOptions, ok := volumeContext[VolumeContextKeyMountOptions]; ok { fuseMountOptions = joinMountOptions(fuseMountOptions, strings.Split(mountOptions, ",")) } - + skipCSIBucketAccessCheck := false for volumeAttribute, mountOption := range volumeAttributesToMountOptionsMapping { value, ok := volumeContext[volumeAttribute] if !ok { @@ -183,7 +184,7 @@ func parseVolumeAttributes(fuseMountOptions []string, volumeContext map[string]s case VolumeContextKeyFileCacheCapacity, VolumeContextKeyMetadataStatCacheCapacity, VolumeContextKeyMetadataTypeCacheCapacity: quantity, err := resource.ParseQuantity(value) if err != nil { - return nil, fmt.Errorf("volume attribute %v only accepts a valid Quantity value, got %q, error: %w", volumeAttribute, value, err) + return nil, skipCSIBucketAccessCheck, fmt.Errorf("volume attribute %v only accepts a valid Quantity value, got %q, error: %w", volumeAttribute, value, err) } megabytes := quantity.Value() @@ -203,7 +204,7 @@ func parseVolumeAttributes(fuseMountOptions []string, volumeContext map[string]s if boolVal, err := strconv.ParseBool(value); err == nil { mountOptionWithValue = mountOption + strconv.FormatBool(boolVal) } else { - return nil, fmt.Errorf("volume attribute %v only accepts a valid bool value, got %q", volumeAttribute, value) + return nil, skipCSIBucketAccessCheck, fmt.Errorf("volume attribute %v only accepts a valid bool value, got %q", volumeAttribute, value) } // parse int volume attributes @@ -215,7 +216,7 @@ func parseVolumeAttributes(fuseMountOptions []string, volumeContext map[string]s mountOptionWithValue = mountOption + strconv.Itoa(intVal) } else { - return nil, fmt.Errorf("volume attribute %v only accepts a valid int value, got %q", volumeAttribute, value) + return nil, skipCSIBucketAccessCheck, fmt.Errorf("volume attribute %v only accepts a valid int value, got %q", volumeAttribute, value) } default: @@ -225,7 +226,18 @@ func parseVolumeAttributes(fuseMountOptions []string, volumeContext map[string]s fuseMountOptions = joinMountOptions(fuseMountOptions, []string{mountOptionWithValue}) } - return fuseMountOptions, nil + value, ok := volumeContext[VolumeContextKeySkipCSIBucketAccessCheck] + if !ok { + return fuseMountOptions, skipCSIBucketAccessCheck, nil + } + + if boolVal, err := strconv.ParseBool(value); err == nil { + skipCSIBucketAccessCheck = boolVal + } else { + return nil, skipCSIBucketAccessCheck, fmt.Errorf("volume attribute %v only accepts a valid bool value, got %q", VolumeContextKeySkipCSIBucketAccessCheck, value) + } + + return fuseMountOptions, skipCSIBucketAccessCheck, nil } func putExitFile(pod *corev1.Pod, emptyDirBasePath string) error { diff --git a/pkg/csi_driver/utils_test.go b/pkg/csi_driver/utils_test.go index a3b9527b..03d370d2 100644 --- a/pkg/csi_driver/utils_test.go +++ b/pkg/csi_driver/utils_test.go @@ -24,6 +24,10 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" ) +const ( + TraceStr = "trace" +) + func TestJoinMountOptions(t *testing.T) { t.Parallel() t.Run("joining mount options into one", func(t *testing.T) { @@ -65,10 +69,11 @@ func TestParseVolumeAttributes(t *testing.T) { t.Run("parsing volume attributes into mount options", func(t *testing.T) { t.Parallel() testCases := []struct { - name string - volumeContext map[string]string - expectedMountOptions []string - expectedErr bool + name string + volumeContext map[string]string + expectedMountOptions []string + expectedSkipBucketAccessCheck bool + expectedErr bool }{ { name: "should return correct fileCacheCapacity 1", @@ -278,7 +283,7 @@ func TestParseVolumeAttributes(t *testing.T) { { name: "should return correct gcsfuseLoggingSeverity", volumeContext: map[string]string{VolumeContextKeyGcsfuseLoggingSeverity: "trace"}, - expectedMountOptions: []string{volumeAttributesToMountOptionsMapping[VolumeContextKeyGcsfuseLoggingSeverity] + "trace"}, + expectedMountOptions: []string{volumeAttributesToMountOptionsMapping[VolumeContextKeyGcsfuseLoggingSeverity] + TraceStr}, }, { name: "should return correct mount options", @@ -302,12 +307,45 @@ func TestParseVolumeAttributes(t *testing.T) { volumeAttributesToMountOptionsMapping[VolumeContextKeyMetadataCacheTTLSeconds] + "3600", }, }, + { + name: "should return correct mount options, and skip bucket access check flag", + expectedSkipBucketAccessCheck: true, + volumeContext: map[string]string{ + VolumeContextKeyMountOptions: "implicit-dirs,uid=1001", + VolumeContextKeyGcsfuseLoggingSeverity: "trace", + VolumeContextKeyFileCacheCapacity: "500Gi", + VolumeContextKeyFileCacheForRangeRead: "true", + VolumeContextKeyMetadataStatCacheCapacity: "-100", + VolumeContextKeyMetadataTypeCacheCapacity: "0", + VolumeContextKeyMetadataCacheTTLSeconds: "3600", + VolumeContextKeySkipCSIBucketAccessCheck: "true", + }, + expectedMountOptions: []string{ + "implicit-dirs", + "uid=1001", + volumeAttributesToMountOptionsMapping[VolumeContextKeyGcsfuseLoggingSeverity] + "trace", + volumeAttributesToMountOptionsMapping[VolumeContextKeyFileCacheCapacity] + "512000", + volumeAttributesToMountOptionsMapping[VolumeContextKeyFileCacheForRangeRead] + "true", + volumeAttributesToMountOptionsMapping[VolumeContextKeyMetadataStatCacheCapacity] + "-1", + volumeAttributesToMountOptionsMapping[VolumeContextKeyMetadataTypeCacheCapacity] + "0", + volumeAttributesToMountOptionsMapping[VolumeContextKeyMetadataCacheTTLSeconds] + "3600", + }, + }, + { + name: "unexpected value for VolumeContextKeySkipCSIBucketAccessCheck", + volumeContext: map[string]string{VolumeContextKeySkipCSIBucketAccessCheck: "blah"}, + expectedErr: true, + }, + { + name: "value set to false for VolumeContextKeySkipCSIBucketAccessCheck", + volumeContext: map[string]string{VolumeContextKeySkipCSIBucketAccessCheck: "false"}, + expectedMountOptions: []string{}, + }, } for _, tc := range testCases { t.Logf("test case: %s", tc.name) - output, err := parseVolumeAttributes([]string{}, tc.volumeContext) - + output, skipCSIBucketAccessCheck, err := parseVolumeAttributes([]string{}, tc.volumeContext) if (err != nil) != tc.expectedErr { t.Errorf("Got error %v, but expected error %v", err, tc.expectedErr) } @@ -315,6 +353,9 @@ func TestParseVolumeAttributes(t *testing.T) { if tc.expectedErr { continue } + if tc.expectedSkipBucketAccessCheck != skipCSIBucketAccessCheck { + t.Errorf("Got skipBucketAccessCheck %v, but expected %v", skipCSIBucketAccessCheck, tc.expectedSkipBucketAccessCheck) + } less := func(a, b string) bool { return a > b } if diff := cmp.Diff(output, tc.expectedMountOptions, cmpopts.SortSlices(less)); diff != "" { diff --git a/test/e2e/specs/specs.go b/test/e2e/specs/specs.go index 6c521718..c17e6f99 100644 --- a/test/e2e/specs/specs.go +++ b/test/e2e/specs/specs.go @@ -52,19 +52,25 @@ const ( TesterContainerName = "volume-tester" K8sServiceAccountName = "gcsfuse-csi-sa" //nolint:gosec - K8sSecretName = "gcsfuse-csi-test-secret" - FakeVolumePrefix = "gcsfuse-csi-fake-volume" - InvalidVolumePrefix = "gcsfuse-csi-invalid-volume" - NonRootVolumePrefix = "gcsfuse-csi-non-root-volume" - InvalidMountOptionsVolumePrefix = "gcsfuse-csi-invalid-mount-options-volume" - ImplicitDirsVolumePrefix = "gcsfuse-csi-implicit-dirs-volume" - ForceNewBucketPrefix = "gcsfuse-csi-force-new-bucket" - SubfolderInBucketPrefix = "gcsfuse-csi-subfolder-in-bucket" - MultipleBucketsPrefix = "gcsfuse-csi-multiple-buckets" - EnableFileCachePrefix = "gcsfuse-csi-enable-file-cache" - EnableFileCacheWithLargeCapacityPrefix = "gcsfuse-csi-enable-file-cache-large-capacity" - ImplicitDirsPath = "implicit-dir" - InvalidVolume = "" + K8sSecretName = "gcsfuse-csi-test-secret" + FakeVolumePrefix = "gcsfuse-csi-fake-volume" + InvalidVolumePrefix = "gcsfuse-csi-invalid-volume" + NonRootVolumePrefix = "gcsfuse-csi-non-root-volume" + InvalidMountOptionsVolumePrefix = "gcsfuse-csi-invalid-mount-options-volume" + ImplicitDirsVolumePrefix = "gcsfuse-csi-implicit-dirs-volume" + ForceNewBucketPrefix = "gcsfuse-csi-force-new-bucket" + SubfolderInBucketPrefix = "gcsfuse-csi-subfolder-in-bucket" + MultipleBucketsPrefix = "gcsfuse-csi-multiple-buckets" + EnableFileCachePrefix = "gcsfuse-csi-enable-file-cache" + EnableFileCacheWithLargeCapacityPrefix = "gcsfuse-csi-enable-file-cache-large-capacity" + ImplicitDirsPath = "implicit-dir" + InvalidVolume = "" + SkipCSIBucketAccessCheckPrefix = "gcsfuse-csi-skip-bucket-access-check" + SkipCSIBucketAccessCheckAndFakeVolumePrefix = "gcsfuse-csi-skip-bucket-access-check-fake-volume" + SkipCSIBucketAccessCheckAndInvalidVolumePrefix = "gcsfuse-csi-skip-bucket-access-check-invalid-volume" + SkipCSIBucketAccessCheckAndInvalidMountOptionsVolumePrefix = "gcsfuse-csi-skip-bucket-access-check-invalid-mount-options-volume" + SkipCSIBucketAccessCheckAndNonRootVolumePrefix = "gcsfuse-csi-skip-bucket-access-check-non-root-volume" + SkipCSIBucketAccessCheckAndImplicitDirsVolumePrefix = "gcsfuse-csi-skip-bucket-access-check-implicit-dirs-volume" GoogleCloudCliImage = "gcr.io/google.com/cloudsdktool/google-cloud-cli:slim" GolangImage = "golang:1.22.1" diff --git a/test/e2e/testdriver.go b/test/e2e/testdriver.go index af9ae3b6..adcedae6 100644 --- a/test/e2e/testdriver.go +++ b/test/e2e/testdriver.go @@ -55,6 +55,7 @@ type gcsVolume struct { fileCacheCapacity string shared bool readOnly bool + skipBucketAccessCheck bool } // InitGCSFuseCSITestDriver returns GCSFuseCSITestDriver that implements TestDriver interface. @@ -144,9 +145,9 @@ func (n *GCSFuseCSITestDriver) CreateVolume(ctx context.Context, config *storage isMultipleBucketsPrefix := false switch config.Prefix { - case specs.FakeVolumePrefix: + case specs.FakeVolumePrefix, specs.SkipCSIBucketAccessCheckAndFakeVolumePrefix: bucketName = uuid.NewString() - case specs.InvalidVolumePrefix: + case specs.InvalidVolumePrefix, specs.SkipCSIBucketAccessCheckAndInvalidVolumePrefix: bucketName = specs.InvalidVolume case specs.ForceNewBucketPrefix: bucketName = n.createBucket(ctx, config.Framework.Namespace.Name) @@ -205,6 +206,18 @@ func (n *GCSFuseCSITestDriver) CreateVolume(ctx context.Context, config *storage v.fileCacheCapacity = "100Mi" case specs.EnableFileCacheWithLargeCapacityPrefix: v.fileCacheCapacity = "2Gi" + case specs.SkipCSIBucketAccessCheckPrefix, specs.SkipCSIBucketAccessCheckAndFakeVolumePrefix, specs.SkipCSIBucketAccessCheckAndInvalidVolumePrefix: + v.skipBucketAccessCheck = true + case specs.SkipCSIBucketAccessCheckAndInvalidMountOptionsVolumePrefix: + mountOptions += ",invalid-option" + v.skipBucketAccessCheck = true + case specs.SkipCSIBucketAccessCheckAndNonRootVolumePrefix: + mountOptions += ",uid=1001" + v.skipBucketAccessCheck = true + case specs.SkipCSIBucketAccessCheckAndImplicitDirsVolumePrefix: + specs.CreateImplicitDirInBucket(specs.ImplicitDirsPath, bucketName) + mountOptions += ",implicit-dirs" + v.skipBucketAccessCheck = true } v.mountOptions = mountOptions @@ -243,6 +256,10 @@ func (n *GCSFuseCSITestDriver) GetPersistentVolumeSource(readOnly bool, _ string va[driver.VolumeContextKeyFileCacheCapacity] = gv.fileCacheCapacity } + if gv.skipBucketAccessCheck { + va[driver.VolumeContextKeySkipCSIBucketAccessCheck] = "true" + } + return &corev1.PersistentVolumeSource{ CSI: &corev1.CSIPersistentVolumeSource{ Driver: n.driverInfo.Name, @@ -266,6 +283,10 @@ func (n *GCSFuseCSITestDriver) GetVolume(config *storageframework.PerTestConfig, va[driver.VolumeContextKeyFileCacheCapacity] = gv.fileCacheCapacity } + if gv.skipBucketAccessCheck { + va[driver.VolumeContextKeySkipCSIBucketAccessCheck] = "true" + } + return va, gv.shared, gv.readOnly } diff --git a/test/e2e/testsuites/failed_mount.go b/test/e2e/testsuites/failed_mount.go index 5f2af276..40456b54 100644 --- a/test/e2e/testsuites/failed_mount.go +++ b/test/e2e/testsuites/failed_mount.go @@ -85,12 +85,8 @@ func (t *gcsFuseCSIFailedMountTestSuite) DefineTests(driver storageframework.Tes framework.ExpectNoError(err, "while cleaning up") } - ginkgo.It("should fail when the specified GCS bucket does not exist", func() { - if pattern.VolType == storageframework.DynamicPV { - e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) - } - - init(specs.FakeVolumePrefix) + testCaseNonExistentBucket := func(configPrefix string) { + init(configPrefix) defer cleanup() ginkgo.By("Configuring the pod") @@ -102,16 +98,26 @@ func (t *gcsFuseCSIFailedMountTestSuite) DefineTests(driver storageframework.Tes defer tPod.Cleanup(ctx) ginkgo.By("Checking that the pod has failed mount error") - tPod.WaitForFailedMountError(ctx, codes.Internal.String()) + tPod.WaitForFailedMountError(ctx, codes.NotFound.String()) tPod.WaitForFailedMountError(ctx, "storage: bucket doesn't exist") + } + + ginkgo.It("should fail when the specified GCS bucket does not exist", func() { + if pattern.VolType == storageframework.DynamicPV { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testCaseNonExistentBucket(specs.FakeVolumePrefix) }) - ginkgo.It("should fail when the specified GCS bucket name is invalid", func() { + ginkgo.It("[csi-skip-bucket-access-check] should fail when the specified GCS bucket does not exist", func() { if pattern.VolType == storageframework.DynamicPV { e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) } + testCaseNonExistentBucket(specs.SkipCSIBucketAccessCheckAndFakeVolumePrefix) + }) - init(specs.InvalidVolumePrefix) + testCaseInvalidBucketName := func(configPrefix string) { + init(configPrefix) defer cleanup() ginkgo.By("Configuring the pod") @@ -123,12 +129,28 @@ func (t *gcsFuseCSIFailedMountTestSuite) DefineTests(driver storageframework.Tes defer tPod.Cleanup(ctx) ginkgo.By("Checking that the pod has failed mount error") - tPod.WaitForFailedMountError(ctx, codes.Internal.String()) + tPod.WaitForFailedMountError(ctx, codes.NotFound.String()) tPod.WaitForFailedMountError(ctx, "storage: bucket doesn't exist") + } + + ginkgo.It("should fail when the specified GCS bucket name is invalid", func() { + if pattern.VolType == storageframework.DynamicPV { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + + testCaseInvalidBucketName(specs.InvalidVolumePrefix) }) - ginkgo.It("should fail when the specified service account does not have access to the GCS bucket", func() { - init() + ginkgo.It("[csi-skip-bucket-access-check] should fail when the specified GCS bucket name is invalid", func() { + if pattern.VolType == storageframework.DynamicPV { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + + testCaseInvalidBucketName(specs.SkipCSIBucketAccessCheckAndInvalidVolumePrefix) + }) + + testCaseSAInsufficientAccess := func(configPrefix string) { + init(configPrefix) defer cleanup() ginkgo.By("Configuring the pod") @@ -151,10 +173,27 @@ func (t *gcsFuseCSIFailedMountTestSuite) DefineTests(driver storageframework.Tes ginkgo.By("Deleting the Kubernetes service account") testK8sSA.Cleanup(ctx) + + // For invalid SA testcase, The Unauthenticated error spawns from prepareStorageClient() in CSI NodePublish. When CSI skips bucket access check, this step is skipped. + if configPrefix == specs.SkipCSIBucketAccessCheckPrefix { + return + } + + ginkgo.By("Checking that the pod has failed mount error Unauthenticated") + tPod.WaitForFailedMountError(ctx, codes.Unauthenticated.String()) + tPod.WaitForFailedMountError(ctx, "storage service manager failed to setup service: context deadline exceeded") + } + + ginkgo.It("should fail when the specified service account does not have access to the GCS bucket", func() { + testCaseSAInsufficientAccess("") }) - ginkgo.It("should fail when the sidecar container is not injected", func() { - init() + ginkgo.It("[csi-skip-bucket-access-check] should fail when the specified service account does not have access to the GCS bucket", func() { + testCaseSAInsufficientAccess(specs.SkipCSIBucketAccessCheckPrefix) + }) + + testCaseSidecarNotInjected := func(configPrefix string) { + init(configPrefix) defer cleanup() ginkgo.By("Configuring the pod") @@ -172,10 +211,18 @@ func (t *gcsFuseCSIFailedMountTestSuite) DefineTests(driver storageframework.Tes ginkgo.By("Checking that the pod has failed mount error") tPod.WaitForFailedMountError(ctx, codes.FailedPrecondition.String()) tPod.WaitForFailedMountError(ctx, "failed to find the sidecar container in Pod spec") + } + + ginkgo.It("should fail when the sidecar container is not injected", func() { + testCaseSidecarNotInjected("") }) - ginkgo.It("should fail when the gcsfuse processes got killed due to OOM", func() { - init() + ginkgo.It("[csi-skip-bucket-access-check] should fail when the sidecar container is not injected", func() { + testCaseSidecarNotInjected(specs.SkipCSIBucketAccessCheckPrefix) + }) + + testCaseGCSFuseOOM := func(configPrefix string) { + init(configPrefix) defer cleanup() ginkgo.By("Configuring the pod") @@ -194,10 +241,19 @@ func (t *gcsFuseCSIFailedMountTestSuite) DefineTests(driver storageframework.Tes ginkgo.By("Checking that the pod has failed mount error") tPod.WaitForFailedMountError(ctx, codes.ResourceExhausted.String()) + } + + ginkgo.It("should fail when the gcsfuse processes got killed due to OOM", func() { + testCaseGCSFuseOOM("") }) - ginkgo.It("should fail when invalid mount options are passed", func() { - init(specs.InvalidMountOptionsVolumePrefix) + ginkgo.It("[csi-skip-bucket-access-check] should fail when the gcsfuse processes got killed due to OOM", func() { + testCaseGCSFuseOOM(specs.SkipCSIBucketAccessCheckPrefix) + }) + + testcaseInvalidMountOptions := func(configPrefix string) { + // init(specs.InvalidMountOptionsVolumePrefix) + init(configPrefix) defer cleanup() ginkgo.By("Configuring the pod") @@ -211,6 +267,14 @@ func (t *gcsFuseCSIFailedMountTestSuite) DefineTests(driver storageframework.Tes ginkgo.By("Checking that the pod has failed mount error") tPod.WaitForFailedMountError(ctx, codes.InvalidArgument.String()) tPod.WaitForFailedMountError(ctx, "Incorrect Usage. flag provided but not defined: -invalid-option") + } + + ginkgo.It("should fail when invalid mount options are passed", func() { + testcaseInvalidMountOptions(specs.InvalidMountOptionsVolumePrefix) + }) + + ginkgo.It("[csi-skip-bucket-access-check] should fail when invalid mount options are passed", func() { + testcaseInvalidMountOptions(specs.SkipCSIBucketAccessCheckAndInvalidMountOptionsVolumePrefix) }) ginkgo.It("should fail when the sidecar container is specified with high resource usage", func() { diff --git a/test/e2e/testsuites/volumes.go b/test/e2e/testsuites/volumes.go index bcc5633e..5da19311 100644 --- a/test/e2e/testsuites/volumes.go +++ b/test/e2e/testsuites/volumes.go @@ -102,8 +102,8 @@ func (t *gcsFuseCSIVolumesTestSuite) DefineTests(driver storageframework.TestDri framework.ExpectNoError(err, "while cleaning up") } - ginkgo.It("should store data and retain the data", func() { - init() + testCaseStoreAndRetainData := func(configPrefix string) { + init(configPrefix) defer cleanup() ginkgo.By("Configuring the first pod") @@ -137,10 +137,18 @@ func (t *gcsFuseCSIVolumesTestSuite) DefineTests(driver storageframework.TestDri ginkgo.By("Checking that the second pod command exits with no error") tPod2.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("mount | grep %v | grep rw,", mountPath)) tPod2.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("grep 'hello world' %v/data", mountPath)) + } + + ginkgo.It("should store data and retain the data", func() { + testCaseStoreAndRetainData("") }) - ginkgo.It("[read-only] should fail when write", func() { - init() + ginkgo.It("[csi-skip-bucket-access-check] should store data and retain the data", func() { + testCaseStoreAndRetainData(specs.SkipCSIBucketAccessCheckPrefix) + }) + + testCaseReadOnlyFailedWrite := func(configPrefix string) { + init(configPrefix) defer cleanup() ginkgo.By("Configuring the writer pod") @@ -182,15 +190,22 @@ func (t *gcsFuseCSIVolumesTestSuite) DefineTests(driver storageframework.TestDri ginkgo.By("Expecting error when write to read-only volumes") tPod.VerifyExecInPodFail(f, specs.TesterContainerName, fmt.Sprintf("echo 'hello world' > %v/data", mountPath), 1) + } + + ginkgo.It("[read-only] should fail when write", func() { + testCaseReadOnlyFailedWrite("") + }) + ginkgo.It("[read-only][csi-skip-bucket-access-check] should fail when write", func() { + testCaseReadOnlyFailedWrite(specs.SkipCSIBucketAccessCheckPrefix) }) - ginkgo.It("[non-root] should store data and retain the data", func() { - init(specs.NonRootVolumePrefix) + testCaseStoreRetainData := func(configPrefix string, uid, gid, fsgroup int) { + init(configPrefix) defer cleanup() ginkgo.By("Configuring the first pod") tPod1 := specs.NewTestPod(f.ClientSet, f.Namespace) - tPod1.SetNonRootSecurityContext(1001, 2002, 0) + tPod1.SetNonRootSecurityContext(uid, gid, fsgroup) tPod1.SetupVolume(l.volumeResource, volumeName, mountPath, false) ginkgo.By("Deploying the first pod") @@ -220,52 +235,25 @@ func (t *gcsFuseCSIVolumesTestSuite) DefineTests(driver storageframework.TestDri ginkgo.By("Checking that the second pod command exits with no error") tPod2.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("mount | grep %v | grep rw,", mountPath)) tPod2.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("grep 'hello world' %v/data", mountPath)) + } + + ginkgo.It("[non-root] should store data and retain the data", func() { + testCaseStoreRetainData(specs.NonRootVolumePrefix, 1001, 2002, 0) + }) + ginkgo.It("[non-root][csi-skip-bucket-access-check] should store data and retain the data", func() { + testCaseStoreRetainData(specs.SkipCSIBucketAccessCheckAndNonRootVolumePrefix, 1001, 2002, 0) }) ginkgo.It("[fsgroup delegation] should store data and retain the data", func() { - init() - defer cleanup() - - ginkgo.By("Configuring the first pod") - tPod1 := specs.NewTestPod(f.ClientSet, f.Namespace) - tPod1.SetNonRootSecurityContext(1001, 2002, 3003) - tPod1.SetupVolume(l.volumeResource, volumeName, mountPath, false) - - ginkgo.By("Deploying the first pod") - tPod1.Create(ctx) - - ginkgo.By("Checking that the first pod is running") - tPod1.WaitForRunning(ctx) - - ginkgo.By("Checking that the first pod command exits with no error") - tPod1.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("mount | grep %v | grep rw,", mountPath)) - tPod1.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("echo 'hello world' > %v/data && grep 'hello world' %v/data", mountPath, mountPath)) - - ginkgo.By("Deleting the first pod") - tPod1.Cleanup(ctx) - - ginkgo.By("Configuring the second pod") - tPod2 := specs.NewTestPod(f.ClientSet, f.Namespace) - tPod2.SetupVolume(l.volumeResource, volumeName, mountPath, false) - - ginkgo.By("Deploying the second pod") - tPod2.Create(ctx) - defer tPod2.Cleanup(ctx) - - ginkgo.By("Checking that the second pod is running") - tPod2.WaitForRunning(ctx) - - ginkgo.By("Checking that the second pod command exits with no error") - tPod2.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("mount | grep %v | grep rw,", mountPath)) - tPod2.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("grep 'hello world' %v/data", mountPath)) + testCaseStoreRetainData("", 1001, 2002, 3003) }) - ginkgo.It("should store data in implicit directory", func() { - if pattern.VolType == storageframework.DynamicPV { - e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) - } + ginkgo.It("[fsgroup delegation][csi-skip-bucket-access-check] should store data and retain the data", func() { + testCaseStoreRetainData(specs.SkipCSIBucketAccessCheckPrefix, 1001, 2002, 3003) + }) - init(specs.ImplicitDirsVolumePrefix) + testCaseImplicitDir := func(configPrefix string) { + init(configPrefix) defer cleanup() ginkgo.By("Configuring the pod") @@ -282,10 +270,24 @@ func (t *gcsFuseCSIVolumesTestSuite) DefineTests(driver storageframework.TestDri ginkgo.By("Checking that the pod command exits with no error") tPod.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("mount | grep %v | grep rw,", mountPath)) tPod.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("echo 'hello world' > %v/%v/data && grep 'hello world' %v/%v/data", mountPath, specs.ImplicitDirsPath, mountPath, specs.ImplicitDirsPath)) + } + ginkgo.It("should store data in implicit directory", func() { + if pattern.VolType == storageframework.DynamicPV { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + + testCaseImplicitDir(specs.ImplicitDirsVolumePrefix) }) + ginkgo.It("[csi-skip-bucket-access-check] should store data in implicit directory", func() { + if pattern.VolType == storageframework.DynamicPV { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } - ginkgo.It("should store data using custom sidecar container image", func() { - init() + testCaseImplicitDir(specs.SkipCSIBucketAccessCheckAndImplicitDirsVolumePrefix) + }) + + testCaseStoreDataCustomContainerImage := func(configPrefix string) { + init(configPrefix) defer cleanup() ginkgo.By("Configuring the pod") @@ -306,10 +308,17 @@ func (t *gcsFuseCSIVolumesTestSuite) DefineTests(driver storageframework.TestDri ginkgo.By("Checking that the pod command exits with no error") tPod.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("mount | grep %v | grep rw,", mountPath)) tPod.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("echo 'hello world' > %v/data && grep 'hello world' %v/data", mountPath, mountPath)) + } + + ginkgo.It("should store data using custom sidecar container image", func() { + testCaseStoreDataCustomContainerImage("") + }) + ginkgo.It("[csi-skip-bucket-access-check] should store data using custom sidecar container image", func() { + testCaseStoreDataCustomContainerImage(specs.SkipCSIBucketAccessCheckPrefix) }) - ginkgo.It("should store data using custom buffer volume", func() { - init() + testCaseCustomBufferVol := func(configPrefix string) { + init(configPrefix) defer cleanup() ginkgo.By("Configuring the pod") @@ -333,10 +342,17 @@ func (t *gcsFuseCSIVolumesTestSuite) DefineTests(driver storageframework.TestDri ginkgo.By("Checking that the pod command exits with no error") tPod.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("mount | grep %v | grep rw,", mountPath)) tPod.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("echo 'hello world' > %v/data && grep 'hello world' %v/data", mountPath, mountPath)) + } + + ginkgo.It("should store data using custom buffer volume", func() { + testCaseCustomBufferVol("") + }) + ginkgo.It("[csi-skip-bucket-access-check] should store data using custom buffer volume", func() { + testCaseCustomBufferVol(specs.SkipCSIBucketAccessCheckPrefix) }) - ginkgo.It("should store data and retain the data in init container", func() { - init() + testCaseStoreDataInitContainer := func(configPrefix string) { + init(configPrefix) defer cleanup() ginkgo.By("Configuring the pod") @@ -356,5 +372,12 @@ func (t *gcsFuseCSIVolumesTestSuite) DefineTests(driver storageframework.TestDri tPod.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("mount | grep %v | grep rw,", mountPath)) tPod.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("grep 'hello world from the init container' %v/data1", mountPath)) tPod.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("echo 'hello world from the regular container' > %v/data2 && grep 'hello world from the regular container' %v/data2", mountPath, mountPath)) + } + + ginkgo.It("should store data and retain the data in init container", func() { + testCaseStoreDataInitContainer("") + }) + ginkgo.It("[csi-skip-bucket-access-check] should store data and retain the data in init container", func() { + testCaseStoreDataInitContainer(specs.SkipCSIBucketAccessCheckPrefix) }) } diff --git a/test/e2e/utils/handler.go b/test/e2e/utils/handler.go index 9664a3bb..be3f38d0 100644 --- a/test/e2e/utils/handler.go +++ b/test/e2e/utils/handler.go @@ -221,8 +221,9 @@ func generateTestSkip(testParams *TestParameters) string { } // TODO(songjiaxun) remove this logic after the next CSI driver release. + // TODO(saikatroyc) remove the skip bucket access checks when managed driver created with skip bucket access check support if testParams.UseGKEManagedDriver { - skipTests = append(skipTests, "Pod.RestartPolicy.is.OnFailure$", "Job.with.RestartPolicy.OnFailure.eventually.succeed", "fast.termination", "fileCache", "gcsfuseIntegrationFileCache", "init.container", "istio") + skipTests = append(skipTests, "Pod.RestartPolicy.is.OnFailure$", "Job.with.RestartPolicy.OnFailure.eventually.succeed", "fast.termination", "fileCache", "gcsfuseIntegrationFileCache", "init.container", "istio", "csi-skip-bucket-access-check") if strings.HasPrefix(testParams.GkeClusterVersion, "1.29") && testParams.SupportsNativeSidecar { skipTests = append(skipTests, "autoTermination", "custom.sidecar.container")