-
Notifications
You must be signed in to change notification settings - Fork 90
OADP-7235: OLMv1 lifecycle tests + OLMv0→OLMv1 migration target #2160
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: oadp-dev
Are you sure you want to change the base?
Changes from all commits
2b88073
76f9c16
ac16803
565e178
8285756
5234080
f9cfb45
9de0e76
d30d922
da78e23
c8ccff9
87a3e6e
4cab1a0
929711e
8d1fb17
3cc3733
698fcc2
8cd05a8
05d8104
04564d0
ba5487d
16fb09e
c62bcbd
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Large diffs are not rendered by default.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| tmp/ |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,228 @@ | ||
| package olmv1_test | ||
|
|
||
| import ( | ||
| "context" | ||
| "fmt" | ||
| "log" | ||
| "time" | ||
|
|
||
| "github.com/onsi/ginkgo/v2" | ||
| "github.com/onsi/gomega" | ||
| corev1 "k8s.io/api/core/v1" | ||
| apierrors "k8s.io/apimachinery/pkg/api/errors" | ||
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
| "k8s.io/apimachinery/pkg/types" | ||
| ) | ||
|
|
||
| const ( | ||
| oadpCRDName = "dataprotectionapplications.oadp.openshift.io" | ||
| veleroCRDName = "backups.velero.io" | ||
| restoreCRDName = "restores.velero.io" | ||
|
kaovilai marked this conversation as resolved.
|
||
|
|
||
| managerLabelSelector = "control-plane=controller-manager" | ||
| ) | ||
|
|
||
| var _ = ginkgo.Describe("OADP OLMv1 lifecycle", ginkgo.Ordered, ginkgo.Label("olmv1"), func() { | ||
| ctx := context.Background() | ||
|
|
||
| ginkgo.BeforeAll(func() { | ||
| skipIfOLMv1NotAvailable(ctx) | ||
|
|
||
| ginkgo.By("Cleaning up orphaned OADP/Velero CRDs from previous installs") | ||
| cleanupOrphanedCRDs(ctx) | ||
|
|
||
| ginkgo.By("Setting up namespace, ServiceAccount, and RBAC") | ||
| ensureNamespace(ctx, namespace) | ||
| ensureServiceAccount(ctx, serviceAccountName, namespace) | ||
| ensureClusterAdminBinding(ctx, serviceAccountName, namespace) | ||
|
|
||
| if catalogImage != "" { | ||
| ginkgo.By(fmt.Sprintf("Creating ClusterCatalog %s from image %s", catalogName, catalogImage)) | ||
| ensureClusterCatalog(ctx, catalogName, catalogImage) | ||
| waitForClusterCatalogServing(ctx, catalogName) | ||
| } | ||
| }) | ||
|
|
||
| ginkgo.AfterAll(func() { | ||
| ginkgo.By("Cleaning up OLMv1 test resources") | ||
| err := deleteClusterExtension(ctx, packageName) | ||
| if err != nil { | ||
| log.Printf("Warning: failed to delete ClusterExtension: %v", err) | ||
| } | ||
|
|
||
| gomega.Eventually(func() bool { | ||
| _, err := getClusterExtension(ctx, packageName) | ||
| return apierrors.IsNotFound(err) | ||
| }, 3*time.Minute, 5*time.Second).Should(gomega.BeTrue(), "ClusterExtension should be deleted") | ||
|
|
||
| if createdCatalog { | ||
| ginkgo.By(fmt.Sprintf("Deleting ClusterCatalog %s", catalogName)) | ||
| deleteClusterCatalog(ctx, catalogName) | ||
| } | ||
|
|
||
| cleanupClusterRoleBinding(ctx, serviceAccountName) | ||
| }) | ||
|
|
||
| ginkgo.It("should install OADP operator via ClusterExtension", func() { | ||
| ginkgo.By("Cleaning up any existing ClusterExtension from previous runs") | ||
| _ = deleteClusterExtension(ctx, packageName) | ||
|
|
||
| ginkgo.By("Creating the ClusterExtension") | ||
| ce := buildClusterExtension(packageName, packageName, namespace, serviceAccountName) | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In the fresh install test (olmv1_install_test.go), the ClusterExtension is created without a catalog selector when catalogImage is empty:
But in if catalogImage != "" {
catalogSpec["selector"] = map[string]interface{}{
"matchLabels": map[string]interface{}{
"olm.operatorframework.io/metadata.name": catalogName,
},
}
}On a cluster with default catalogs (like openshift-community-operators), the community OADP package could be resolved instead of the test build. The PR's own design notes call this out explicitly. But the install test doesn't protect against it the way the migration test does (which uses withCatalogSelector). This might be intentional since test-olmv1 always builds and pushes a catalog image, so catalogImage is always set. But if someone runs the test manually against a productized catalog without setting catalogImage, they could get unexpected resolution. |
||
| _, err := dynamicClient.Resource(clusterExtensionGVR).Create(ctx, ce, metav1.CreateOptions{}) | ||
|
kaovilai marked this conversation as resolved.
|
||
| gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
| log.Printf("Created ClusterExtension %s (package=%s, namespace=%s)", packageName, packageName, namespace) | ||
|
|
||
| ginkgo.By("Waiting for ClusterExtension to be installed") | ||
| terminalReasons := map[string]bool{ | ||
| "InvalidConfiguration": true, | ||
| "Failed": true, | ||
| } | ||
| gomega.Eventually(func(g gomega.Gomega) { | ||
| obj, err := getClusterExtension(ctx, packageName) | ||
| g.Expect(err).NotTo(gomega.HaveOccurred(), "ClusterExtension should exist") | ||
|
|
||
| logAllConditions(obj) | ||
|
|
||
| progCond, progFound := getCondition(obj, "Progressing") | ||
| if progFound { | ||
| reason, _ := progCond["reason"].(string) | ||
| message, _ := progCond["message"].(string) | ||
| g.Expect(terminalReasons[reason]).NotTo(gomega.BeTrue(), | ||
| "ClusterExtension has terminal error on Progressing: reason=%s message=%s", reason, message) | ||
| } | ||
|
|
||
| instCond, instFound := getCondition(obj, "Installed") | ||
| g.Expect(instFound).To(gomega.BeTrue(), "Installed condition should be present") | ||
| status, _ := instCond["status"].(string) | ||
| g.Expect(status).To(gomega.Equal("True"), "Installed condition should be True") | ||
| }, 10*time.Minute, 10*time.Second).Should(gomega.Succeed()) | ||
|
|
||
| ginkgo.By("Checking installed bundle info") | ||
| obj, err := getClusterExtension(ctx, packageName) | ||
| gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
| bundleName, bundleVersion, found := getInstalledBundle(obj) | ||
| gomega.Expect(found).To(gomega.BeTrue(), "installed bundle should be present in status") | ||
| log.Printf("Installed bundle: name=%s version=%s", bundleName, bundleVersion) | ||
| }) | ||
|
|
||
| ginkgo.It("should have the OADP controller-manager pod running", func() { | ||
| ginkgo.By("Waiting for controller-manager pod to be Running") | ||
| gomega.Eventually(func() (bool, error) { | ||
| pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ | ||
| LabelSelector: managerLabelSelector, | ||
| }) | ||
| if err != nil { | ||
| return false, err | ||
| } | ||
| for _, pod := range pods.Items { | ||
| if pod.Status.Phase == corev1.PodRunning { | ||
| log.Printf("Controller-manager pod %s is Running", pod.Name) | ||
| return true, nil | ||
| } | ||
| log.Printf("Controller-manager pod %s phase: %s", pod.Name, pod.Status.Phase) | ||
| } | ||
| return false, nil | ||
| }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), "controller-manager pod should be Running") | ||
|
weshayutin marked this conversation as resolved.
|
||
| }) | ||
|
|
||
| ginkgo.It("should have OADP CRDs installed", func() { | ||
| expectedCRDs := []string{ | ||
|
kaovilai marked this conversation as resolved.
|
||
| oadpCRDName, | ||
| veleroCRDName, | ||
| restoreCRDName, | ||
| "schedules.velero.io", | ||
| "backupstoragelocations.velero.io", | ||
| "volumesnapshotlocations.velero.io", | ||
| } | ||
|
|
||
| for _, crdName := range expectedCRDs { | ||
| ginkgo.By(fmt.Sprintf("Checking CRD %s exists", crdName)) | ||
| exists, err := crdExists(ctx, crdName) | ||
| gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
| gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("CRD %s should exist", crdName)) | ||
| log.Printf("CRD %s exists", crdName) | ||
| } | ||
| }) | ||
|
|
||
| ginkgo.It("should not report deprecation warnings", func() { | ||
| obj, err := getClusterExtension(ctx, packageName) | ||
| gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
|
|
||
| for _, condType := range []string{"Deprecated", "PackageDeprecated", "ChannelDeprecated", "BundleDeprecated"} { | ||
| cond, found := getCondition(obj, condType) | ||
| if found { | ||
| status, _ := cond["status"].(string) | ||
| gomega.Expect(status).To(gomega.Equal("False"), | ||
| fmt.Sprintf("%s condition should be False, got %s", condType, status)) | ||
| } | ||
| } | ||
| }) | ||
|
|
||
| ginkgo.When("upgrading the operator", func() { | ||
| ginkgo.BeforeAll(func() { | ||
| if upgradeVersion == "" { | ||
| ginkgo.Skip("No --upgrade-version specified, skipping upgrade tests") | ||
| } | ||
| }) | ||
|
|
||
| ginkgo.It("should upgrade the ClusterExtension to the target version", func() { | ||
| ginkgo.By(fmt.Sprintf("Patching ClusterExtension version to %s", upgradeVersion)) | ||
| obj, err := getClusterExtension(ctx, packageName) | ||
| gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
|
|
||
| previousBundleName, previousVersion, _ := getInstalledBundle(obj) | ||
| log.Printf("Current installed bundle: name=%s version=%s", previousBundleName, previousVersion) | ||
|
|
||
| patch := []byte(fmt.Sprintf(`{"spec":{"source":{"catalog":{"version":"%s","upgradeConstraintPolicy":"SelfCertified"}}}}`, upgradeVersion)) | ||
| _, err = dynamicClient.Resource(clusterExtensionGVR).Patch(ctx, packageName, types.MergePatchType, patch, metav1.PatchOptions{}) | ||
| gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
| log.Printf("Patched ClusterExtension version to %s", upgradeVersion) | ||
|
|
||
| ginkgo.By("Waiting for upgrade to complete") | ||
| gomega.Eventually(func() string { | ||
| updated, err := getClusterExtension(ctx, packageName) | ||
| if err != nil { | ||
| return "" | ||
| } | ||
|
|
||
| cond, found := getCondition(updated, "Installed") | ||
| if !found { | ||
| return "" | ||
| } | ||
| status, _ := cond["status"].(string) | ||
| if status != "True" { | ||
| reason, _ := cond["reason"].(string) | ||
| message, _ := cond["message"].(string) | ||
| log.Printf("Installed condition: status=%s reason=%s message=%s", status, reason, message) | ||
| return "" | ||
| } | ||
|
|
||
| _, bundleVer, found := getInstalledBundle(updated) | ||
| if !found { | ||
| return "" | ||
| } | ||
| log.Printf("Installed bundle version: %s", bundleVer) | ||
| return bundleVer | ||
| }, 10*time.Minute, 10*time.Second).ShouldNot(gomega.Equal(previousVersion), | ||
| "Installed bundle version should change after upgrade") | ||
|
|
||
| ginkgo.By("Verifying controller-manager pod is running after upgrade") | ||
| gomega.Eventually(func() (bool, error) { | ||
| pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ | ||
| LabelSelector: managerLabelSelector, | ||
| }) | ||
| if err != nil { | ||
| return false, err | ||
| } | ||
| for _, pod := range pods.Items { | ||
| if pod.Status.Phase == corev1.PodRunning { | ||
| return true, nil | ||
| } | ||
| } | ||
| return false, nil | ||
| }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue()) | ||
| }) | ||
| }) | ||
| }) | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Enabling SingleNamespace is needed for OLMv1 compatibility, makes sense. One thing to be aware of: this also changes OLMv0 behavior. Previously OLM would reject an OperatorGroup that targets a namespace different from the install namespace. With this change, that configuration is now allowed.
OADP assumes the operator pod runs in the same namespace it watches (Velero deployment, secrets, SCC management all target WATCH_NAMESPACE). If someone creates an OperatorGroup with a divergent targetNamespaces, things would likely break.
Low risk since nobody does that accidentally, and the OLMv1 flow always sets watchNamespace equal to the install namespace. But worth documenting in the CSV or release notes that SingleNamespace is supported only when the target namespace matches the install namespace.