From 3c4424c4c68f4f36b5037acee0ca3beb19f75452 Mon Sep 17 00:00:00 2001 From: Bikash Shaw Date: Fri, 20 Mar 2026 14:46:05 +0530 Subject: [PATCH 1/2] Fix: PinnedImages test should respect node taints The PinnedImages conformance test was selecting worker nodes by label only, ignoring node taints. This caused issues in environments with dedicated/tainted nodes (e.g., OPCT test infrastructure, edge zones). Problem: - Test created custom MachineConfigPool targeting any worker node - Could select nodes with NoSchedule/NoExecute taints - In OPCT, selecting the dedicated node caused pod eviction and test failure Solution: - Use e2enode.GetReadySchedulableNodes() to filter nodes - This function excludes nodes with NoSchedule/NoExecute taints - Follows the same pattern used by other OpenShift conformance tests Changes: - Added import: e2enode "k8s.io/kubernetes/test/e2e/framework/node" - Modified addWorkerNodesToCustomPool() to: 1. Get schedulable nodes using e2enode.GetReadySchedulableNodes() 2. Filter for worker nodes only 3. Select from schedulable workers (excluding tainted nodes) Testing: - Verified node filtering logic correctly excludes tainted nodes - Tested on cluster with dedicated OPCT node (node-role.kubernetes.io/tests:NoSchedule) - Dedicated node correctly filtered out, test selects from 3 schedulable workers Fixes: https://github.com/redhat-openshift-ecosystem/opct/issues/[TBD] Co-Authored-By: Claude Sonnet 4.5 --- test/extended/machine_config/pinnedimages.go | 24 ++++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/test/extended/machine_config/pinnedimages.go b/test/extended/machine_config/pinnedimages.go index 56b7fffb0e30..09c3b5c85600 100644 --- a/test/extended/machine_config/pinnedimages.go +++ b/test/extended/machine_config/pinnedimages.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" "sigs.k8s.io/yaml" ) @@ -353,24 +354,33 @@ func applyPIS(oc *exutil.CLI, pisFixture string, pis *mcfgv1.PinnedImageSet, pis // `addWorkerNodesToCustomPool` labels the desired number of worker nodes with the MCP role // selector so that the nodes become part of the desired custom MCP func addWorkerNodesToCustomPool(oc *exutil.CLI, kubeClient *kubernetes.Clientset, numberOfNodes int, customMCP string) ([]string, error) { - // Get the worker nodes - nodes, err := kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{"node-role.kubernetes.io/worker": ""}).String()}) + // Get ready schedulable nodes (excludes nodes with NoSchedule/NoExecute taints) + nodes, err := e2enode.GetReadySchedulableNodes(context.TODO(), kubeClient) if err != nil { return nil, err } - // Return an error if there are less worker nodes in the cluster than the desired number of nodes to add to the custom MCP - if len(nodes.Items) < numberOfNodes { - return nil, fmt.Errorf("Node in Worker MCP %d < Number of nodes needed in %d MCP", len(nodes.Items), numberOfNodes) + + // Filter for worker nodes only + var workerNodes []corev1.Node + for _, node := range nodes.Items { + if _, hasWorkerLabel := node.Labels["node-role.kubernetes.io/worker"]; hasWorkerLabel { + workerNodes = append(workerNodes, node) + } + } + + // Return an error if there are less schedulable worker nodes than the desired number of nodes to add to the custom MCP + if len(workerNodes) < numberOfNodes { + return nil, fmt.Errorf("Schedulable nodes in Worker MCP %d < Number of nodes needed in %s MCP %d", len(workerNodes), customMCP, numberOfNodes) } // Label the nodes with the custom MCP role selector var optedNodes []string for node_i := 0; node_i < numberOfNodes; node_i++ { - err = oc.AsAdmin().Run("label").Args("node", nodes.Items[node_i].Name, fmt.Sprintf("node-role.kubernetes.io/%s=", customMCP)).Execute() + err = oc.AsAdmin().Run("label").Args("node", workerNodes[node_i].Name, fmt.Sprintf("node-role.kubernetes.io/%s=", customMCP)).Execute() if err != nil { return nil, err } - optedNodes = append(optedNodes, nodes.Items[node_i].Name) + optedNodes = append(optedNodes, workerNodes[node_i].Name) } return optedNodes, nil } From 9b9fb62509bfa04cdbe7c6ace225a56ddd196442 Mon Sep 17 00:00:00 2001 From: Bikash Shaw Date: Tue, 24 Mar 2026 10:25:04 +0530 Subject: [PATCH 2/2] Refactor: Extract schedulable worker nodes logic to reusable utility function As suggested in code review, created a shared utility function GetReadySchedulableWorkerNodes() in test/extended/node/node_utils.go to encapsulate the logic of retrieving schedulable worker nodes (excluding nodes with NoSchedule/NoExecute taints). This makes the node selection logic reusable across multiple tests and improves code maintainability. Changes: - Added GetReadySchedulableWorkerNodes() in test/extended/node/node_utils.go - Refactored PinnedImages test to use the new utility function - Simplified addWorkerNodesToCustomPool() by removing manual filtering logic --- test/extended/machine_config/pinnedimages.go | 14 +++--------- test/extended/node/node_utils.go | 23 ++++++++++++++++++++ 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/test/extended/machine_config/pinnedimages.go b/test/extended/machine_config/pinnedimages.go index 09c3b5c85600..a356a3dac1ff 100644 --- a/test/extended/machine_config/pinnedimages.go +++ b/test/extended/machine_config/pinnedimages.go @@ -13,6 +13,7 @@ import ( mcfgv1 "github.com/openshift/api/machineconfiguration/v1" mcClient "github.com/openshift/client-go/machineconfiguration/clientset/versioned" + nodeutil "github.com/openshift/origin/test/extended/node" exutil "github.com/openshift/origin/test/extended/util" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -21,7 +22,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" "sigs.k8s.io/yaml" ) @@ -354,20 +354,12 @@ func applyPIS(oc *exutil.CLI, pisFixture string, pis *mcfgv1.PinnedImageSet, pis // `addWorkerNodesToCustomPool` labels the desired number of worker nodes with the MCP role // selector so that the nodes become part of the desired custom MCP func addWorkerNodesToCustomPool(oc *exutil.CLI, kubeClient *kubernetes.Clientset, numberOfNodes int, customMCP string) ([]string, error) { - // Get ready schedulable nodes (excludes nodes with NoSchedule/NoExecute taints) - nodes, err := e2enode.GetReadySchedulableNodes(context.TODO(), kubeClient) + // Get ready schedulable worker nodes (excludes nodes with NoSchedule/NoExecute taints) + workerNodes, err := nodeutil.GetReadySchedulableWorkerNodes(context.TODO(), kubeClient) if err != nil { return nil, err } - // Filter for worker nodes only - var workerNodes []corev1.Node - for _, node := range nodes.Items { - if _, hasWorkerLabel := node.Labels["node-role.kubernetes.io/worker"]; hasWorkerLabel { - workerNodes = append(workerNodes, node) - } - } - // Return an error if there are less schedulable worker nodes than the desired number of nodes to add to the custom MCP if len(workerNodes) < numberOfNodes { return nil, fmt.Errorf("Schedulable nodes in Worker MCP %d < Number of nodes needed in %s MCP %d", len(workerNodes), customMCP, numberOfNodes) diff --git a/test/extended/node/node_utils.go b/test/extended/node/node_utils.go index 1fdffb6adb07..d6044cfd8456 100644 --- a/test/extended/node/node_utils.go +++ b/test/extended/node/node_utils.go @@ -15,8 +15,10 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" o "github.com/onsi/gomega" @@ -656,3 +658,24 @@ func ensureDropInDirectoryExists(ctx context.Context, oc *exutil.CLI, dirPath st return nil } + +// GetReadySchedulableWorkerNodes returns ready schedulable worker nodes. +// This function filters out nodes with NoSchedule/NoExecute taints and non-worker nodes, +// making it suitable for tests that need to select worker nodes for workload placement. +func GetReadySchedulableWorkerNodes(ctx context.Context, client kubernetes.Interface) ([]v1.Node, error) { + // Get ready schedulable nodes (excludes nodes with NoSchedule/NoExecute taints) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, client) + if err != nil { + return nil, err + } + + // Filter for worker nodes only + var workerNodes []v1.Node + for _, node := range nodes.Items { + if _, hasWorkerLabel := node.Labels["node-role.kubernetes.io/worker"]; hasWorkerLabel { + workerNodes = append(workerNodes, node) + } + } + + return workerNodes, nil +}