Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
209 changes: 209 additions & 0 deletions test/extended/storage/csi_readonly_rootfs.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,209 @@
package storage

import (
"context"
"fmt"
"strings"

g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
configv1 "github.com/openshift/api/config/v1"
exutil "github.com/openshift/origin/test/extended/util"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
)

// csiResourceCheck defines a check for CSI controller or node resources
type csiResourceCheck struct {
ResourceType ResourceType
Namespace string
Name string
}

// Use the existing ResourceType from the package

var _ = g.Describe("[sig-storage][OCPFeature:CSIReadOnlyRootFilesystem][Jira:Storage] CSI Driver ReadOnly Root Filesystem", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("csi-readonly-rootfs")
)

g.BeforeEach(func() {
isMicroShift, err := exutil.IsMicroShiftCluster(oc.AdminKubeClient())
o.Expect(err).NotTo(o.HaveOccurred())
if isMicroShift {
g.Skip("CSI ReadOnlyRootFilesystem tests are not supported on MicroShift")
}

// Check to see if we have Storage enabled
isStorageEnabled, err := exutil.IsCapabilityEnabled(oc, configv1.ClusterVersionCapabilityStorage)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to check storage capability")
if !isStorageEnabled {
g.Skip("skipping, this test is only expected to work with storage enabled clusters")
}
})

g.It("should verify CSI controller and driver node containers readOnlyRootFilesystem set to true", func() {
// Get all CSI drivers present in the cluster
allCSIResources := getCSIResourcesInCluster(oc)

runReadOnlyRootFsChecks(oc, allCSIResources)
})
})

// getCSIResourcesInCluster dynamically discovers OpenShift CSI driver resources present in the cluster
func getCSIResourcesInCluster(oc *exutil.CLI) []csiResourceCheck {
ctx := context.TODO()
var resources []csiResourceCheck

// Check if this is a Hypershift deployment
isHypershift := isHypershiftCluster(oc)
if isHypershift {
e2e.Logf("Detected Hypershift cluster - will only check CSI node components (controllers are on management cluster)")
}

// Known OpenShift CSI driver prefixes
// https://issues.redhat.com/browse/OCPBUGS-76298 not check ebs driver
openshiftCSIDrivers := []string{
// "aws-ebs-csi-driver",
"aws-efs-csi-driver",
"azure-disk-csi-driver",
"azure-file-csi-driver",
"gcp-pd-csi-driver",
"gcp-filestore-csi-driver",
"vmware-vsphere-csi-driver",
"ibm-vpc-block-csi",
"openstack-cinder-csi-driver",
"openstack-manila-csi",
"smb-csi-driver",
}
Comment on lines +68 to +80
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Make the zero-resource case explicit.

If discovery returns no matches, the test currently falls through to the success path and validates nothing. With the hardcoded driver prefixes and the explicit EBS exclusion, that false-positive path is realistic.

Proposed fix
 	e2e.Logf("Found %d OpenShift CSI resources in cluster", len(resources))
+	if len(resources) == 0 {
+		g.Skip("no matching CSI resources discovered for readOnlyRootFilesystem validation")
+	}
 	for _, resource := range resources {
 		e2e.Logf("OpenShift CSI Resource: %s %s/%s", resource.ResourceType, resource.Namespace, resource.Name)
 	}

Also applies to: 138-143, 203-208

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@test/extended/storage/csi_readonly_rootfs.go` around lines 68 - 80, The test
currently treats an empty discovery result as success; after calling discovery
(where results are stored in the matches/driverMatches variable), add an
explicit check for the zero-resource case and fail the test with a clear message
(e.g., t.Fatalf("no CSI drivers discovered for prefixes %v",
openshiftCSIDrivers)) so an empty discovery does not produce a false positive;
apply the same explicit check and failure to the other similar places that use
the hardcoded driver lists (the blocks using openshiftCSIDrivers and the other
driver-prefix slices at the locations corresponding to lines ~138-143 and
~203-208).


isOpenShiftCSIDriver := func(name string) bool {
for _, driver := range openshiftCSIDrivers {
if strings.HasPrefix(name, driver) {
return true
}
}
return false
}

// Check CSI namespace for deployments and daemonsets
getNamespaceCheckResources := func(namespace string) {
// Get all deployments in the namespace
deployments, err := oc.AdminKubeClient().AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
g.Fail(fmt.Sprintf("Failed to list deployments in namespace %s: %v", namespace, err))
}
Comment on lines +95 to +97
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

🏁 Script executed:

# First, let's find the file and check its size
find . -name "csi_readonly_rootfs.go" -type f

Repository: openshift/origin

Length of output: 106


🏁 Script executed:

# Check the helpers.go file to understand the namespace definitions
find . -path "*/test/extended/storage/helpers.go" -type f

Repository: openshift/origin

Length of output: 94


🏁 Script executed:

# Read the target file to understand the code structure
wc -l test/extended/storage/csi_readonly_rootfs.go

Repository: openshift/origin

Length of output: 108


🏁 Script executed:

cat -n test/extended/storage/csi_readonly_rootfs.go

Repository: openshift/origin

Length of output: 8903


🏁 Script executed:

cat -n test/extended/storage/helpers.go | head -50

Repository: openshift/origin

Length of output: 2673


Don't unconditionally scan the Manila CSI namespace on non-OpenStack clusters.

The ManilaCSINamespace is documented as OpenStack-only in test/extended/storage/helpers.go:30, but getCSIResourcesInCluster calls getNamespaceCheckResources(ManilaCSINamespace) at line 136 regardless of platform. The namespace listing at lines 95–97 and 117–119 hard-fails on any error, so the test will abort on clusters where this namespace doesn't exist.

Proposed fix
 import (
 	"context"
 	"fmt"
 	"strings"
 
 	g "github.com/onsi/ginkgo/v2"
 	o "github.com/onsi/gomega"
 	configv1 "github.com/openshift/api/config/v1"
 	exutil "github.com/openshift/origin/test/extended/util"
 	corev1 "k8s.io/api/core/v1"
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	e2e "k8s.io/kubernetes/test/e2e/framework"
 )
@@
 	// Check main CSI namespace
 	getNamespaceCheckResources(CSINamespace)
 	// Check Manila CSI namespace (OpenStack)
-	getNamespaceCheckResources(ManilaCSINamespace)
+	if _, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(ctx, ManilaCSINamespace, metav1.GetOptions{}); err == nil {
+		getNamespaceCheckResources(ManilaCSINamespace)
+	} else if !apierrors.IsNotFound(err) {
+		g.Fail(fmt.Sprintf("Failed to get namespace %s: %v", ManilaCSINamespace, err))
+	}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
if err != nil {
g.Fail(fmt.Sprintf("Failed to list deployments in namespace %s: %v", namespace, err))
}
import (
"context"
"fmt"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
configv1 "github.com/openshift/api/config/v1"
exutil "github.com/openshift/origin/test/extended/util"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// ... existing code ...
// Check main CSI namespace
getNamespaceCheckResources(CSINamespace)
// Check Manila CSI namespace (OpenStack)
if _, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(ctx, ManilaCSINamespace, metav1.GetOptions{}); err == nil {
getNamespaceCheckResources(ManilaCSINamespace)
} else if !apierrors.IsNotFound(err) {
g.Fail(fmt.Sprintf("Failed to get namespace %s: %v", ManilaCSINamespace, err))
}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@test/extended/storage/csi_readonly_rootfs.go` around lines 95 - 97, The test
is unconditionally attempting to list resources in ManilaCSINamespace via
getCSIResourcesInCluster -> getNamespaceCheckResources which will hard-fail on
non-OpenStack clusters; modify getCSIResourcesInCluster (or the caller) to first
guard access to ManilaCSINamespace by checking the cluster platform (e.g., an
existing isOpenStack/platform check) or by attempting a namespace GET and
treating NotFound as a non-fatal case, and only call
getNamespaceCheckResources(ManilaCSINamespace) when the platform is OpenStack or
the namespace exists; ensure any namespace-list errors for ManilaCSINamespace
are handled as skip/continue rather than g.Fail so the test does not abort on
clusters without that namespace.


for _, deployment := range deployments.Items {
// Check if it's an OpenShift CSI controller deployment
if isOpenShiftCSIDriver(deployment.Name) && strings.Contains(deployment.Name, "controller") {
// Skip controller deployments for Hypershift as they run on management cluster
if isHypershift {
e2e.Logf("Skipping controller deployment %s in Hypershift (runs on management cluster)", deployment.Name)
continue
}
resources = append(resources, csiResourceCheck{
ResourceType: ResourceTypeDeployment,
Namespace: namespace,
Name: deployment.Name,
})
}
}

// Get all daemonsets in the namespace
daemonsets, err := oc.AdminKubeClient().AppsV1().DaemonSets(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
g.Fail(fmt.Sprintf("Failed to list daemonsets in namespace %s: %v", namespace, err))
}

for _, daemonset := range daemonsets.Items {
// Check if it's an OpenShift CSI node daemonset
if isOpenShiftCSIDriver(daemonset.Name) && strings.Contains(daemonset.Name, "node") {
resources = append(resources, csiResourceCheck{
ResourceType: ResourceTypeDaemonSet,
Namespace: namespace,
Name: daemonset.Name,
})
}
}
}

// Check main CSI namespace
getNamespaceCheckResources(CSINamespace)
// Check Manila CSI namespace (OpenStack)
getNamespaceCheckResources(ManilaCSINamespace)

e2e.Logf("Found %d OpenShift CSI resources in cluster", len(resources))
for _, resource := range resources {
e2e.Logf("OpenShift CSI Resource: %s %s/%s", resource.ResourceType, resource.Namespace, resource.Name)
}

return resources
}

// isHypershiftCluster detects if this is a Hypershift (HCP) cluster
func isHypershiftCluster(oc *exutil.CLI) bool {

controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc)
o.Expect(err).NotTo(o.HaveOccurred())
return *controlPlaneTopology == configv1.ExternalTopologyMode
}

// runReadOnlyRootFsChecks verifies that all containers in the resource have readOnlyRootFilesystem set
func runReadOnlyRootFsChecks(oc *exutil.CLI, resources []csiResourceCheck) {
results := []string{}
hasFail := false

for _, resource := range resources {

resourceName := fmt.Sprintf("%s %s/%s", resource.ResourceType, resource.Namespace, resource.Name)

var podSpec *corev1.PodSpec

switch resource.ResourceType {
case ResourceTypeDeployment:
deployment, err := oc.AdminKubeClient().AppsV1().Deployments(resource.Namespace).Get(context.TODO(), resource.Name, metav1.GetOptions{})
if err != nil {
g.Fail(fmt.Sprintf("Error fetching %s: %v", resourceName, err))
}
podSpec = &deployment.Spec.Template.Spec

case ResourceTypeDaemonSet:
daemonset, err := oc.AdminKubeClient().AppsV1().DaemonSets(resource.Namespace).Get(context.TODO(), resource.Name, metav1.GetOptions{})
if err != nil {
g.Fail(fmt.Sprintf("Error fetching %s: %v", resourceName, err))
}
podSpec = &daemonset.Spec.Template.Spec

default:
g.Fail(fmt.Sprintf("Unsupported resource type: %s", resource.ResourceType))
}

// Check all containers and init containers
containersWithoutReadOnlyRootFs := []string{}
allContainers := append([]corev1.Container{}, podSpec.Containers...)
allContainers = append(allContainers, podSpec.InitContainers...)

for _, container := range allContainers {
if container.SecurityContext == nil || container.SecurityContext.ReadOnlyRootFilesystem == nil || !*container.SecurityContext.ReadOnlyRootFilesystem {
containersWithoutReadOnlyRootFs = append(containersWithoutReadOnlyRootFs, container.Name)
}
}

if len(containersWithoutReadOnlyRootFs) > 0 {
results = append(results, fmt.Sprintf("[FAIL] %s has containers without readOnlyRootFilesystem: %s", resourceName, strings.Join(containersWithoutReadOnlyRootFs, ", ")))
hasFail = true
} else {
results = append(results, fmt.Sprintf("[PASS] %s (all %d containers have readOnlyRootFilesystem: true)", resourceName, len(allContainers)))
}
}

if hasFail {
summary := strings.Join(results, "\n")
g.Fail(fmt.Sprintf("Some CSI resources have containers without readOnlyRootFilesystem:\n\n%s\n", summary))
} else {
e2e.Logf("All checked CSI resources have readOnlyRootFilesystem set correctly:\n%s", strings.Join(results, "\n"))
}
}