Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
2b88073
dnm: test olmv1 deployment
weshayutin Apr 10, 2026
76f9c16
fix unit tests
weshayutin Apr 13, 2026
ac16803
update lint
weshayutin Apr 13, 2026
565e178
Enhance OLMv1 migration support: update service account naming, add m…
kaovilai May 5, 2026
8285756
Add OLMv0 remnant cleanup and CatalogSource migration to OLMv1 tests
kaovilai May 7, 2026
5234080
Add olm.managed=true cleanup to Makefile migration target
kaovilai May 7, 2026
f9cfb45
Add CI compatibility, version verification, and docs for OLMv1 migrat…
kaovilai May 7, 2026
9de0e76
Add generated OLMv1 manifest to .gitignore
kaovilai May 7, 2026
d30d922
Address review feedback: fix manifest schema, scope cleanup, add mirr…
kaovilai May 7, 2026
da78e23
fix: replace xargs -r with portable POSIX alternative in Makefile
kaovilai May 11, 2026
c8ccff9
fix: handle List errors in Eventually closures to prevent nil deref
kaovilai May 11, 2026
87a3e6e
fix: log List errors in OLMv0 remnant cleanup instead of discarding
kaovilai May 11, 2026
4cab1a0
fix: set createdCatalog flag when existing ClusterCatalog matches image
kaovilai May 11, 2026
929711e
fix: wait for CRD deletion to complete before proceeding
kaovilai May 11, 2026
8d1fb17
fix: delete existing ClusterExtension before Create for rerun safety
kaovilai May 11, 2026
3cc3733
fix: pin catalog selector in migration path when catalog is auto-dete…
kaovilai May 11, 2026
698fcc2
fix: log when ClusterCatalog has no conditions yet during wait
kaovilai May 12, 2026
8cd05a8
fix: default OLMv1 test targets to ttl.sh with auto-build
kaovilai May 12, 2026
05d8104
fix: default OLMV1_VERSION to VERSION (99.0.0)
kaovilai May 12, 2026
04564d0
fix: echo OLMv1 test configuration before running
kaovilai May 12, 2026
ba5487d
fix: skip OLMv1 tests when cluster lacks prerequisites
kaovilai May 13, 2026
16fb09e
fix: check NewOLMOwnSingleNamespace feature gate before running tests
kaovilai May 13, 2026
c62bcbd
fix: support CustomNoUpgrade as alternative to TechPreviewNoUpgrade
kaovilai May 14, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ go.work
*~

# OADP
# Generated by `make generate-olmv1-manifest` — regenerated each run with current config
oadp-olmv1-manifest.yaml
tests/e2e/e2e.test
tests/e2e/templates/*.yaml
.DS_Store
Expand Down
1 change: 1 addition & 0 deletions .golangci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ linters:
- third_party$
- builtin$
- examples$
- tests/olmv1
issues:
max-issues-per-linter: 0
max-same-issues: 0
Expand Down
328 changes: 326 additions & 2 deletions Makefile

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion bundle/manifests/oadp-operator.clusterserviceversion.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1639,7 +1639,7 @@ spec:
installModes:
- supported: true
type: OwnNamespace
- supported: false
- supported: true
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Enabling SingleNamespace is needed for OLMv1 compatibility, makes sense. One thing to be aware of: this also changes OLMv0 behavior. Previously OLM would reject an OperatorGroup that targets a namespace different from the install namespace. With this change, that configuration is now allowed.

OADP assumes the operator pod runs in the same namespace it watches (Velero deployment, secrets, SCC management all target WATCH_NAMESPACE). If someone creates an OperatorGroup with a divergent targetNamespaces, things would likely break.

Low risk since nobody does that accidentally, and the OLMv1 flow always sets watchNamespace equal to the install namespace. But worth documenting in the CSV or release notes that SingleNamespace is supported only when the target namespace matches the install namespace.

type: SingleNamespace
- supported: false
type: MultiNamespace
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,7 @@ spec:
installModes:
- supported: true
type: OwnNamespace
- supported: false
- supported: true
type: SingleNamespace
- supported: false
type: MultiNamespace
Expand Down
1 change: 1 addition & 0 deletions tests/olmv1/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
tmp/
228 changes: 228 additions & 0 deletions tests/olmv1/olmv1_install_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,228 @@
package olmv1_test

import (
"context"
"fmt"
"log"
"time"

"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)

const (
oadpCRDName = "dataprotectionapplications.oadp.openshift.io"
veleroCRDName = "backups.velero.io"
restoreCRDName = "restores.velero.io"
Comment thread
kaovilai marked this conversation as resolved.

managerLabelSelector = "control-plane=controller-manager"
)

var _ = ginkgo.Describe("OADP OLMv1 lifecycle", ginkgo.Ordered, ginkgo.Label("olmv1"), func() {
ctx := context.Background()

ginkgo.BeforeAll(func() {
skipIfOLMv1NotAvailable(ctx)

ginkgo.By("Cleaning up orphaned OADP/Velero CRDs from previous installs")
cleanupOrphanedCRDs(ctx)

ginkgo.By("Setting up namespace, ServiceAccount, and RBAC")
ensureNamespace(ctx, namespace)
ensureServiceAccount(ctx, serviceAccountName, namespace)
ensureClusterAdminBinding(ctx, serviceAccountName, namespace)

if catalogImage != "" {
ginkgo.By(fmt.Sprintf("Creating ClusterCatalog %s from image %s", catalogName, catalogImage))
ensureClusterCatalog(ctx, catalogName, catalogImage)
waitForClusterCatalogServing(ctx, catalogName)
}
})

ginkgo.AfterAll(func() {
ginkgo.By("Cleaning up OLMv1 test resources")
err := deleteClusterExtension(ctx, packageName)
if err != nil {
log.Printf("Warning: failed to delete ClusterExtension: %v", err)
}

gomega.Eventually(func() bool {
_, err := getClusterExtension(ctx, packageName)
return apierrors.IsNotFound(err)
}, 3*time.Minute, 5*time.Second).Should(gomega.BeTrue(), "ClusterExtension should be deleted")

if createdCatalog {
ginkgo.By(fmt.Sprintf("Deleting ClusterCatalog %s", catalogName))
deleteClusterCatalog(ctx, catalogName)
}

cleanupClusterRoleBinding(ctx, serviceAccountName)
})

ginkgo.It("should install OADP operator via ClusterExtension", func() {
ginkgo.By("Cleaning up any existing ClusterExtension from previous runs")
_ = deleteClusterExtension(ctx, packageName)

ginkgo.By("Creating the ClusterExtension")
ce := buildClusterExtension(packageName, packageName, namespace, serviceAccountName)
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the fresh install test (olmv1_install_test.go), the ClusterExtension is created without a catalog selector when catalogImage is empty:

ce := buildClusterExtension(packageName, packageName, namespace, serviceAccountName)

But in buildClusterExtension, the catalog selector is only added when catalogImage != "":

  if catalogImage != "" {
      catalogSpec["selector"] = map[string]interface{}{
          "matchLabels": map[string]interface{}{
              "olm.operatorframework.io/metadata.name": catalogName,
          },
      }
  }

On a cluster with default catalogs (like openshift-community-operators), the community OADP package could be resolved instead of the test build. The PR's own design notes call this out explicitly. But the install test doesn't protect against it the way the migration test does (which uses withCatalogSelector).

This might be intentional since test-olmv1 always builds and pushes a catalog image, so catalogImage is always set. But if someone runs the test manually against a productized catalog without setting catalogImage, they could get unexpected resolution.

_, err := dynamicClient.Resource(clusterExtensionGVR).Create(ctx, ce, metav1.CreateOptions{})
Comment thread
kaovilai marked this conversation as resolved.
gomega.Expect(err).NotTo(gomega.HaveOccurred())
log.Printf("Created ClusterExtension %s (package=%s, namespace=%s)", packageName, packageName, namespace)

ginkgo.By("Waiting for ClusterExtension to be installed")
terminalReasons := map[string]bool{
"InvalidConfiguration": true,
"Failed": true,
}
gomega.Eventually(func(g gomega.Gomega) {
obj, err := getClusterExtension(ctx, packageName)
g.Expect(err).NotTo(gomega.HaveOccurred(), "ClusterExtension should exist")

logAllConditions(obj)

progCond, progFound := getCondition(obj, "Progressing")
if progFound {
reason, _ := progCond["reason"].(string)
message, _ := progCond["message"].(string)
g.Expect(terminalReasons[reason]).NotTo(gomega.BeTrue(),
"ClusterExtension has terminal error on Progressing: reason=%s message=%s", reason, message)
}

instCond, instFound := getCondition(obj, "Installed")
g.Expect(instFound).To(gomega.BeTrue(), "Installed condition should be present")
status, _ := instCond["status"].(string)
g.Expect(status).To(gomega.Equal("True"), "Installed condition should be True")
}, 10*time.Minute, 10*time.Second).Should(gomega.Succeed())

ginkgo.By("Checking installed bundle info")
obj, err := getClusterExtension(ctx, packageName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
bundleName, bundleVersion, found := getInstalledBundle(obj)
gomega.Expect(found).To(gomega.BeTrue(), "installed bundle should be present in status")
log.Printf("Installed bundle: name=%s version=%s", bundleName, bundleVersion)
})

ginkgo.It("should have the OADP controller-manager pod running", func() {
ginkgo.By("Waiting for controller-manager pod to be Running")
gomega.Eventually(func() (bool, error) {
pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: managerLabelSelector,
})
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Status.Phase == corev1.PodRunning {
log.Printf("Controller-manager pod %s is Running", pod.Name)
return true, nil
}
log.Printf("Controller-manager pod %s phase: %s", pod.Name, pod.Status.Phase)
}
return false, nil
}, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), "controller-manager pod should be Running")
Comment thread
weshayutin marked this conversation as resolved.
})

ginkgo.It("should have OADP CRDs installed", func() {
expectedCRDs := []string{
Comment thread
kaovilai marked this conversation as resolved.
oadpCRDName,
veleroCRDName,
restoreCRDName,
"schedules.velero.io",
"backupstoragelocations.velero.io",
"volumesnapshotlocations.velero.io",
}

for _, crdName := range expectedCRDs {
ginkgo.By(fmt.Sprintf("Checking CRD %s exists", crdName))
exists, err := crdExists(ctx, crdName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("CRD %s should exist", crdName))
log.Printf("CRD %s exists", crdName)
}
})

ginkgo.It("should not report deprecation warnings", func() {
obj, err := getClusterExtension(ctx, packageName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

for _, condType := range []string{"Deprecated", "PackageDeprecated", "ChannelDeprecated", "BundleDeprecated"} {
cond, found := getCondition(obj, condType)
if found {
status, _ := cond["status"].(string)
gomega.Expect(status).To(gomega.Equal("False"),
fmt.Sprintf("%s condition should be False, got %s", condType, status))
}
}
})

ginkgo.When("upgrading the operator", func() {
ginkgo.BeforeAll(func() {
if upgradeVersion == "" {
ginkgo.Skip("No --upgrade-version specified, skipping upgrade tests")
}
})

ginkgo.It("should upgrade the ClusterExtension to the target version", func() {
ginkgo.By(fmt.Sprintf("Patching ClusterExtension version to %s", upgradeVersion))
obj, err := getClusterExtension(ctx, packageName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

previousBundleName, previousVersion, _ := getInstalledBundle(obj)
log.Printf("Current installed bundle: name=%s version=%s", previousBundleName, previousVersion)

patch := []byte(fmt.Sprintf(`{"spec":{"source":{"catalog":{"version":"%s","upgradeConstraintPolicy":"SelfCertified"}}}}`, upgradeVersion))
_, err = dynamicClient.Resource(clusterExtensionGVR).Patch(ctx, packageName, types.MergePatchType, patch, metav1.PatchOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
log.Printf("Patched ClusterExtension version to %s", upgradeVersion)

ginkgo.By("Waiting for upgrade to complete")
gomega.Eventually(func() string {
updated, err := getClusterExtension(ctx, packageName)
if err != nil {
return ""
}

cond, found := getCondition(updated, "Installed")
if !found {
return ""
}
status, _ := cond["status"].(string)
if status != "True" {
reason, _ := cond["reason"].(string)
message, _ := cond["message"].(string)
log.Printf("Installed condition: status=%s reason=%s message=%s", status, reason, message)
return ""
}

_, bundleVer, found := getInstalledBundle(updated)
if !found {
return ""
}
log.Printf("Installed bundle version: %s", bundleVer)
return bundleVer
}, 10*time.Minute, 10*time.Second).ShouldNot(gomega.Equal(previousVersion),
"Installed bundle version should change after upgrade")

ginkgo.By("Verifying controller-manager pod is running after upgrade")
gomega.Eventually(func() (bool, error) {
pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: managerLabelSelector,
})
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Status.Phase == corev1.PodRunning {
return true, nil
}
}
return false, nil
}, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue())
})
})
})

Loading