diff --git a/.github/workflows/cla-check.yml b/.github/workflows/cla-check.yml
deleted file mode 100644
index 336080214..000000000
--- a/.github/workflows/cla-check.yml
+++ /dev/null
@@ -1,54 +0,0 @@
-name: "Agreements"
-
-permissions:
- contents: write
- pull-requests: write
-
-on:
- issue_comment:
- types: [ created ]
- pull_request_target:
- types: [ opened, closed, synchronize ]
-
-jobs:
- ContributorLicenseAgreement:
- runs-on: ubuntu-latest
- steps:
- - name: "CLA Assistant"
- if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target'
- uses: cla-assistant/github-action@v2.1.3-beta
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
- with:
- path-to-signatures: "signatures/version1/cla.json"
- path-to-document: "https://github.com/splunk/cla-agreement/blob/main/CLA.md"
- branch: "main"
- allowlist: dependabot[bot]
- remote-organization-name: splunk
- remote-repository-name: cla-agreement
- custom-notsigned-prcomment: "
Thank you for your submission, we really appreciate it. Like many open-source projects, we ask that you sign our [Contribution License Agreement](${input.getPathToDocument()}) before we can accept your contribution. You can sign the CLA by just posting a Pull Request Comment with the exact sentence copied from below.
"
- custom-allsigned-prcomment: "⏳ **CLA signed** — now checking Code of Conduct status..."
- CodeOfConduct:
- runs-on: ubuntu-latest
- # CLA and COC jobs both edit the same PR comment to show signing status.
- # Run sequentially to avoid race conditions when updating the comment.
- needs: ContributorLicenseAgreement
- steps:
- - name: "COC Assistant"
- if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the Code of Conduct and I hereby sign the COC') || github.event_name == 'pull_request_target'
- uses: cla-assistant/github-action@v2.1.3-beta
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
- with:
- path-to-signatures: "signatures/version1/coc.json"
- path-to-document: "https://github.com/splunk/cla-agreement/blob/main/CODE_OF_CONDUCT.md"
- branch: "main"
- allowlist: dependabot[bot]
- remote-organization-name: splunk
- remote-repository-name: cla-agreement
- custom-pr-sign-comment: "I have read the Code of Conduct and I hereby sign the COC"
- signed-commit-message: "$contributorName has signed the COC in #$pullRequestNo"
- custom-notsigned-prcomment: "
🎉 **CLA signed — one more step to go!**
Please also accept our [Code of Conduct](${input.getPathToDocument()}) by posting a comment with the exact sentence copied from below. This helps us maintain a welcoming community.
"
- custom-allsigned-prcomment: "All contributors have signed required documents ✍️ ✅"
\ No newline at end of file
diff --git a/internal/controller/clustermaster_controller_test.go b/internal/controller/clustermaster_controller_test.go
index 5c5de2584..127c478df 100644
--- a/internal/controller/clustermaster_controller_test.go
+++ b/internal/controller/clustermaster_controller_test.go
@@ -2,14 +2,11 @@ package controller
import (
"context"
- "fmt"
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
- "time"
-
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
@@ -25,10 +22,6 @@ import (
var _ = Describe("ClusterMaster Controller", func() {
- BeforeEach(func() {
- time.Sleep(2 * time.Second)
- })
-
AfterEach(func() {
})
@@ -151,20 +144,16 @@ func CreateClusterMaster(name string, namespace string, annotations map[string]s
}
ssSpec := testutils.NewClusterMaster(name, namespace, "image")
Expect(k8sClient.Create(context.Background(), ssSpec)).Should(Succeed())
- time.Sleep(2 * time.Second)
By("Expecting ClusterMaster custom resource to be created successfully")
ss := &enterpriseApiV3.ClusterMaster{}
Eventually(func() bool {
- _ = k8sClient.Get(context.Background(), key, ss)
- if status != "" {
- fmt.Printf("status is set to %v", status)
- ss.Status.Phase = status
- Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
- time.Sleep(2 * time.Second)
- }
- return true
+ return k8sClient.Get(context.Background(), key, ss) == nil
}, timeout, interval).Should(BeTrue())
+ if status != "" {
+ ss.Status.Phase = status
+ Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
+ }
return ss
}
@@ -178,20 +167,16 @@ func UpdateClusterMaster(instance *enterpriseApiV3.ClusterMaster, status enterpr
ssSpec := testutils.NewClusterMaster(instance.Name, instance.Namespace, "image")
ssSpec.ResourceVersion = instance.ResourceVersion
Expect(k8sClient.Update(context.Background(), ssSpec)).Should(Succeed())
- time.Sleep(2 * time.Second)
- By("Expecting ClusterMaster custom resource to be created successfully")
+ By("Expecting ClusterMaster custom resource to be updated successfully")
ss := &enterpriseApiV3.ClusterMaster{}
Eventually(func() bool {
- _ = k8sClient.Get(context.Background(), key, ss)
- if status != "" {
- fmt.Printf("status is set to %v", status)
- ss.Status.Phase = status
- Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
- time.Sleep(2 * time.Second)
- }
- return true
+ return k8sClient.Get(context.Background(), key, ss) == nil
}, timeout, interval).Should(BeTrue())
+ if status != "" {
+ ss.Status.Phase = status
+ Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
+ }
return ss
}
diff --git a/internal/controller/licensemanager_controller_test.go b/internal/controller/licensemanager_controller_test.go
index 4d95d6b5f..734e2f932 100644
--- a/internal/controller/licensemanager_controller_test.go
+++ b/internal/controller/licensemanager_controller_test.go
@@ -2,15 +2,12 @@ package controller
import (
"context"
- "fmt"
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- "time"
-
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -24,10 +21,6 @@ import (
var _ = Describe("LicenseManager Controller", func() {
- BeforeEach(func() {
- time.Sleep(2 * time.Second)
- })
-
AfterEach(func() {
})
@@ -148,20 +141,16 @@ func CreateLicenseManager(name string, namespace string, annotations map[string]
}
ssSpec := testutils.NewLicenseManager(name, namespace, "image")
Expect(k8sClient.Create(context.Background(), ssSpec)).Should(Succeed())
- time.Sleep(2 * time.Second)
By("Expecting LicenseManager custom resource to be created successfully")
ss := &enterpriseApi.LicenseManager{}
Eventually(func() bool {
- _ = k8sClient.Get(context.Background(), key, ss)
- if status != "" {
- fmt.Printf("status is set to %v", status)
- ss.Status.Phase = status
- Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
- time.Sleep(2 * time.Second)
- }
- return true
+ return k8sClient.Get(context.Background(), key, ss) == nil
}, timeout, interval).Should(BeTrue())
+ if status != "" {
+ ss.Status.Phase = status
+ Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
+ }
return ss
}
@@ -175,20 +164,16 @@ func UpdateLicenseManager(instance *enterpriseApi.LicenseManager, status enterpr
ssSpec := testutils.NewLicenseManager(instance.Name, instance.Namespace, "image")
ssSpec.ResourceVersion = instance.ResourceVersion
Expect(k8sClient.Update(context.Background(), ssSpec)).Should(Succeed())
- time.Sleep(2 * time.Second)
- By("Expecting LicenseManager custom resource to be created successfully")
+ By("Expecting LicenseManager custom resource to be updated successfully")
ss := &enterpriseApi.LicenseManager{}
Eventually(func() bool {
- _ = k8sClient.Get(context.Background(), key, ss)
- if status != "" {
- fmt.Printf("status is set to %v", status)
- ss.Status.Phase = status
- Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
- time.Sleep(2 * time.Second)
- }
- return true
+ return k8sClient.Get(context.Background(), key, ss) == nil
}, timeout, interval).Should(BeTrue())
+ if status != "" {
+ ss.Status.Phase = status
+ Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
+ }
return ss
}
diff --git a/internal/controller/licensemaster_controller_test.go b/internal/controller/licensemaster_controller_test.go
index fdd967aa3..214a0710c 100644
--- a/internal/controller/licensemaster_controller_test.go
+++ b/internal/controller/licensemaster_controller_test.go
@@ -2,15 +2,12 @@ package controller
import (
"context"
- "fmt"
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- "time"
-
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -25,10 +22,6 @@ import (
var _ = Describe("LicenseMaster Controller", func() {
- BeforeEach(func() {
- time.Sleep(2 * time.Second)
- })
-
AfterEach(func() {
})
@@ -149,20 +142,16 @@ func CreateLicenseMaster(name string, namespace string, annotations map[string]s
}
ssSpec := testutils.NewLicenseMaster(name, namespace, "image")
Expect(k8sClient.Create(context.Background(), ssSpec)).Should(Succeed())
- time.Sleep(2 * time.Second)
By("Expecting LicenseMaster custom resource to be created successfully")
ss := &enterpriseApiV3.LicenseMaster{}
Eventually(func() bool {
- _ = k8sClient.Get(context.Background(), key, ss)
- if status != "" {
- fmt.Printf("status is set to %v", status)
- ss.Status.Phase = status
- Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
- time.Sleep(2 * time.Second)
- }
- return true
+ return k8sClient.Get(context.Background(), key, ss) == nil
}, timeout, interval).Should(BeTrue())
+ if status != "" {
+ ss.Status.Phase = status
+ Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
+ }
return ss
}
@@ -176,20 +165,16 @@ func UpdateLicenseMaster(instance *enterpriseApiV3.LicenseMaster, status enterpr
ssSpec := testutils.NewLicenseMaster(instance.Name, instance.Namespace, "image")
ssSpec.ResourceVersion = instance.ResourceVersion
Expect(k8sClient.Update(context.Background(), ssSpec)).Should(Succeed())
- time.Sleep(2 * time.Second)
- By("Expecting LicenseMaster custom resource to be created successfully")
+ By("Expecting LicenseMaster custom resource to be updated successfully")
ss := &enterpriseApiV3.LicenseMaster{}
Eventually(func() bool {
- _ = k8sClient.Get(context.Background(), key, ss)
- if status != "" {
- fmt.Printf("status is set to %v", status)
- ss.Status.Phase = status
- Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
- time.Sleep(2 * time.Second)
- }
- return true
+ return k8sClient.Get(context.Background(), key, ss) == nil
}, timeout, interval).Should(BeTrue())
+ if status != "" {
+ ss.Status.Phase = status
+ Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
+ }
return ss
}
diff --git a/internal/controller/monitoringconsole_controller_test.go b/internal/controller/monitoringconsole_controller_test.go
index 644f13da0..8159c15ae 100644
--- a/internal/controller/monitoringconsole_controller_test.go
+++ b/internal/controller/monitoringconsole_controller_test.go
@@ -2,14 +2,11 @@ package controller
import (
"context"
- "fmt"
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
- "time"
-
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -24,10 +21,6 @@ import (
var _ = Describe("MonitoringConsole Controller", func() {
- BeforeEach(func() {
- time.Sleep(2 * time.Second)
- })
-
AfterEach(func() {
})
@@ -148,20 +141,16 @@ func CreateMonitoringConsole(name string, namespace string, annotations map[stri
}
ssSpec := testutils.NewMonitoringConsole(name, namespace, "image")
Expect(k8sClient.Create(context.Background(), ssSpec)).Should(Succeed())
- time.Sleep(2 * time.Second)
By("Expecting MonitoringConsole custom resource to be created successfully")
ss := &enterpriseApi.MonitoringConsole{}
Eventually(func() bool {
- _ = k8sClient.Get(context.Background(), key, ss)
- if status != "" {
- fmt.Printf("status is set to %v", status)
- ss.Status.Phase = status
- Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
- time.Sleep(2 * time.Second)
- }
- return true
+ return k8sClient.Get(context.Background(), key, ss) == nil
}, timeout, interval).Should(BeTrue())
+ if status != "" {
+ ss.Status.Phase = status
+ Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
+ }
return ss
}
@@ -175,20 +164,16 @@ func UpdateMonitoringConsole(instance *enterpriseApi.MonitoringConsole, status e
ssSpec := testutils.NewMonitoringConsole(instance.Name, instance.Namespace, "image")
ssSpec.ResourceVersion = instance.ResourceVersion
Expect(k8sClient.Update(context.Background(), ssSpec)).Should(Succeed())
- time.Sleep(2 * time.Second)
- By("Expecting MonitoringConsole custom resource to be created successfully")
+ By("Expecting MonitoringConsole custom resource to be updated successfully")
ss := &enterpriseApi.MonitoringConsole{}
Eventually(func() bool {
- _ = k8sClient.Get(context.Background(), key, ss)
- if status != "" {
- fmt.Printf("status is set to %v", status)
- ss.Status.Phase = status
- Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
- time.Sleep(2 * time.Second)
- }
- return true
+ return k8sClient.Get(context.Background(), key, ss) == nil
}, timeout, interval).Should(BeTrue())
+ if status != "" {
+ ss.Status.Phase = status
+ Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
+ }
return ss
}
diff --git a/internal/controller/searchheadcluster_controller_test.go b/internal/controller/searchheadcluster_controller_test.go
index 983849237..fd15b22e1 100644
--- a/internal/controller/searchheadcluster_controller_test.go
+++ b/internal/controller/searchheadcluster_controller_test.go
@@ -2,14 +2,11 @@ package controller
import (
"context"
- "fmt"
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
- "time"
-
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -24,10 +21,6 @@ import (
var _ = Describe("SearchHeadCluster Controller", func() {
- BeforeEach(func() {
- time.Sleep(2 * time.Second)
- })
-
AfterEach(func() {
})
@@ -149,21 +142,17 @@ func CreateSearchHeadCluster(name string, namespace string, annotations map[stri
}
ssSpec := testutils.NewSearchHeadCluster(name, namespace, "image")
Expect(k8sClient.Create(context.Background(), ssSpec)).Should(Succeed())
- time.Sleep(2 * time.Second)
By("Expecting SearchHeadCluster custom resource to be created successfully")
ss := &enterpriseApi.SearchHeadCluster{}
Eventually(func() bool {
- _ = k8sClient.Get(context.Background(), key, ss)
- if status != "" {
- fmt.Printf("status is set to %v", status)
- ss.Status.Phase = status
- ss.Status.DeployerPhase = status
- Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
- time.Sleep(2 * time.Second)
- }
- return true
+ return k8sClient.Get(context.Background(), key, ss) == nil
}, timeout, interval).Should(BeTrue())
+ if status != "" {
+ ss.Status.Phase = status
+ ss.Status.DeployerPhase = status
+ Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
+ }
return ss
}
@@ -177,21 +166,17 @@ func UpdateSearchHeadCluster(instance *enterpriseApi.SearchHeadCluster, status e
ssSpec := testutils.NewSearchHeadCluster(instance.Name, instance.Namespace, "image")
ssSpec.ResourceVersion = instance.ResourceVersion
Expect(k8sClient.Update(context.Background(), ssSpec)).Should(Succeed())
- time.Sleep(2 * time.Second)
- By("Expecting SearchHeadCluster custom resource to be created successfully")
+ By("Expecting SearchHeadCluster custom resource to be updated successfully")
ss := &enterpriseApi.SearchHeadCluster{}
Eventually(func() bool {
- _ = k8sClient.Get(context.Background(), key, ss)
- if status != "" {
- fmt.Printf("status is set to %v", status)
- ss.Status.Phase = status
- ss.Status.DeployerPhase = status
- Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
- time.Sleep(2 * time.Second)
- }
- return true
+ return k8sClient.Get(context.Background(), key, ss) == nil
}, timeout, interval).Should(BeTrue())
+ if status != "" {
+ ss.Status.Phase = status
+ ss.Status.DeployerPhase = status
+ Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
+ }
return ss
}
diff --git a/internal/controller/standalone_controller_test.go b/internal/controller/standalone_controller_test.go
index d7c4ca842..e4cf6923b 100644
--- a/internal/controller/standalone_controller_test.go
+++ b/internal/controller/standalone_controller_test.go
@@ -2,14 +2,12 @@ package controller
import (
"context"
- "fmt"
+ "time"
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
- "time"
-
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -22,15 +20,11 @@ import (
"k8s.io/client-go/kubernetes/scheme"
)
-const timeout = time.Second * 120
-const interval = time.Second * 2
+const timeout = time.Second * 10
+const interval = time.Millisecond * 250
var _ = Describe("Standalone Controller", func() {
- BeforeEach(func() {
- time.Sleep(2 * time.Second)
- })
-
AfterEach(func() {
})
@@ -159,20 +153,16 @@ func CreateStandalone(name string, namespace string, annotations map[string]stri
}
ssSpec = testutils.NewStandalone(name, namespace, "image")
Expect(k8sClient.Create(context.Background(), ssSpec)).Should(Succeed())
- time.Sleep(2 * time.Second)
By("Expecting Standalone custom resource to be created successfully")
ss := &enterpriseApi.Standalone{}
Eventually(func() bool {
- _ = k8sClient.Get(context.Background(), key, ss)
- if status != "" {
- fmt.Printf("status is set to %v", status)
- ss.Status.Phase = status
- Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
- time.Sleep(2 * time.Second)
- }
- return true
+ return k8sClient.Get(context.Background(), key, ss) == nil
}, timeout, interval).Should(BeTrue())
+ if status != "" {
+ ss.Status.Phase = status
+ Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
+ }
return ss
}
@@ -186,20 +176,16 @@ func UpdateStandalone(instance *enterpriseApi.Standalone, status enterpriseApi.P
ssSpec := testutils.NewStandalone(instance.Name, instance.Namespace, "image")
ssSpec.ResourceVersion = instance.ResourceVersion
Expect(k8sClient.Update(context.Background(), ssSpec)).Should(Succeed())
- time.Sleep(2 * time.Second)
- By("Expecting Standalone custom resource to be created successfully")
+ By("Expecting Standalone custom resource to be updated successfully")
ss := &enterpriseApi.Standalone{}
Eventually(func() bool {
- _ = k8sClient.Get(context.Background(), key, ss)
- if status != "" {
- fmt.Printf("status is set to %v", status)
- ss.Status.Phase = status
- Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
- time.Sleep(2 * time.Second)
- }
- return true
+ return k8sClient.Get(context.Background(), key, ss) == nil
}, timeout, interval).Should(BeTrue())
+ if status != "" {
+ ss.Status.Phase = status
+ Expect(k8sClient.Status().Update(context.Background(), ss)).Should(Succeed())
+ }
return ss
}
diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go
index c9e65e9f3..8eacf16a2 100644
--- a/pkg/splunk/enterprise/indexercluster.go
+++ b/pkg/splunk/enterprise/indexercluster.go
@@ -888,7 +888,7 @@ func (mgr *indexerClusterPodManager) Update(ctx context.Context, c splcommon.Con
eventPublisher := GetEventPublisher(ctx, mgr.cr)
// Track last successful replica count to emit scale events after completion
- previousReplicas := mgr.cr.Status.Replicas
+ previousReadyReplicas := mgr.cr.Status.ReadyReplicas
// Assign client
if mgr.c == nil {
@@ -927,17 +927,19 @@ func (mgr *indexerClusterPodManager) Update(ctx context.Context, c splcommon.Con
return phase, err
}
- // Emit ScaledUp event only after a successful scale-up has completed
+ // Emit scale events when phase is ready and ready replicas changed to match desired
if phase == enterpriseApi.PhaseReady {
- if desiredReplicas > previousReplicas && mgr.cr.Status.Replicas == desiredReplicas {
- if eventPublisher != nil {
- eventPublisher.Normal(ctx, "ScaledUp",
- fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", mgr.cr.GetName(), previousReplicas, desiredReplicas))
- }
- } else if desiredReplicas < previousReplicas && mgr.cr.Status.Replicas == desiredReplicas {
- if eventPublisher != nil {
- eventPublisher.Normal(ctx, "ScaledDown",
- fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", mgr.cr.GetName(), previousReplicas, desiredReplicas))
+ if mgr.cr.Status.ReadyReplicas == desiredReplicas && previousReadyReplicas != desiredReplicas {
+ if desiredReplicas > previousReadyReplicas {
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "ScaledUp",
+ fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", mgr.cr.GetName(), previousReadyReplicas, desiredReplicas))
+ }
+ } else if desiredReplicas < previousReadyReplicas {
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", mgr.cr.GetName(), previousReadyReplicas, desiredReplicas))
+ }
}
}
}
diff --git a/pkg/splunk/enterprise/searchheadclusterpodmanager.go b/pkg/splunk/enterprise/searchheadclusterpodmanager.go
index 7b3a19d30..b12656efe 100644
--- a/pkg/splunk/enterprise/searchheadclusterpodmanager.go
+++ b/pkg/splunk/enterprise/searchheadclusterpodmanager.go
@@ -49,7 +49,7 @@ func (mgr *searchHeadClusterPodManager) Update(ctx context.Context, c splcommon.
eventPublisher := GetEventPublisher(ctx, mgr.cr)
// Track last successful replica count to emit scale events after completion
- previousReplicas := mgr.cr.Status.Replicas
+ previousReadyReplicas := mgr.cr.Status.ReadyReplicas
// update statefulset, if necessary
_, err := splctrl.ApplyStatefulSet(ctx, mgr.c, statefulSet)
@@ -79,17 +79,19 @@ func (mgr *searchHeadClusterPodManager) Update(ctx context.Context, c splcommon.
return phase, err
}
- // Emit ScaledUp event only after a successful scale-up has completed
+ // Emit scale events when phase is ready and ready replicas changed to match desired
if phase == enterpriseApi.PhaseReady {
- if desiredReplicas > previousReplicas && mgr.cr.Status.Replicas == desiredReplicas {
- if eventPublisher != nil {
- eventPublisher.Normal(ctx, "ScaledUp",
- fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", mgr.cr.GetName(), previousReplicas, desiredReplicas))
- }
- } else if desiredReplicas < previousReplicas && mgr.cr.Status.Replicas == desiredReplicas {
- if eventPublisher != nil {
- eventPublisher.Normal(ctx, "ScaledDown",
- fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", mgr.cr.GetName(), previousReplicas, desiredReplicas))
+ if mgr.cr.Status.ReadyReplicas == desiredReplicas && previousReadyReplicas != desiredReplicas {
+ if desiredReplicas > previousReadyReplicas {
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "ScaledUp",
+ fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", mgr.cr.GetName(), previousReadyReplicas, desiredReplicas))
+ }
+ } else if desiredReplicas < previousReadyReplicas {
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", mgr.cr.GetName(), previousReadyReplicas, desiredReplicas))
+ }
}
}
}
diff --git a/pkg/splunk/enterprise/standalone.go b/pkg/splunk/enterprise/standalone.go
index dd498ce33..0d9718132 100644
--- a/pkg/splunk/enterprise/standalone.go
+++ b/pkg/splunk/enterprise/standalone.go
@@ -216,7 +216,7 @@ func ApplyStandalone(ctx context.Context, client splcommon.ControllerClient, cr
}
// Track last successful replica count to emit scale events after completion
- previousReplicas := cr.Status.Replicas
+ previousReadyReplicas := cr.Status.ReadyReplicas
mgr := splctrl.DefaultStatefulSetPodManager{}
phase, err := mgr.Update(ctx, client, statefulSet, cr.Spec.Replicas)
@@ -228,18 +228,20 @@ func ApplyStandalone(ctx context.Context, client splcommon.ControllerClient, cr
}
cr.Status.Phase = phase
- // Emit scale events only after a successful scale operation has completed
+ // Emit scale events when phase is ready and ready replicas changed to match desired
if phase == enterpriseApi.PhaseReady {
desiredReplicas := cr.Spec.Replicas
- if desiredReplicas > previousReplicas && cr.Status.Replicas == desiredReplicas {
- if eventPublisher != nil {
- eventPublisher.Normal(ctx, "ScaledUp",
- fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas))
- }
- } else if desiredReplicas < previousReplicas && cr.Status.Replicas == desiredReplicas {
- if eventPublisher != nil {
- eventPublisher.Normal(ctx, "ScaledDown",
- fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas))
+ if cr.Status.ReadyReplicas == desiredReplicas && previousReadyReplicas != desiredReplicas {
+ if desiredReplicas > previousReadyReplicas {
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "ScaledUp",
+ fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", cr.GetName(), previousReadyReplicas, desiredReplicas))
+ }
+ } else if desiredReplicas < previousReadyReplicas {
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReadyReplicas, desiredReplicas))
+ }
}
}
}
diff --git a/pkg/splunk/enterprise/telemetry_test.go b/pkg/splunk/enterprise/telemetry_test.go
index 8a7a55073..f6c623017 100644
--- a/pkg/splunk/enterprise/telemetry_test.go
+++ b/pkg/splunk/enterprise/telemetry_test.go
@@ -6,11 +6,12 @@ import (
"context"
"encoding/json"
"errors"
+ "testing"
+ "time"
+
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
splclient "github.com/splunk/splunk-operator/pkg/splunk/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
- "testing"
- "time"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
"github.com/splunk/splunk-operator/pkg/splunk/test"
@@ -234,10 +235,8 @@ func TestTelemetryUpdateLastTransmissionTime_RepeatedCalls(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "default"},
Data: map[string]string{},
}
- status := &TelemetryStatus{Test: "false"}
- updateLastTransmissionTime(ctx, mockClient, cm, status)
+ status := &TelemetryStatus{Test: "false", LastTransmission: "1970-01-01T00:00:00Z"}
firstStatus := cm.Data[telStatusKey]
- time.Sleep(1 * time.Second)
updateLastTransmissionTime(ctx, mockClient, cm, status)
secondStatus := cm.Data[telStatusKey]
if firstStatus == secondStatus {
diff --git a/test/appframework_aws/c3/appframework_aws_test.go b/test/appframework_aws/c3/appframework_aws_test.go
index 2d150f5ac..0846a4768 100644
--- a/test/appframework_aws/c3/appframework_aws_test.go
+++ b/test/appframework_aws/c3/appframework_aws_test.go
@@ -19,7 +19,6 @@ import (
"fmt"
"path/filepath"
"strings"
- "time"
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
@@ -619,7 +618,11 @@ var _ = Describe("c3appfw test", func() {
cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames}
shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo}
- time.Sleep(60 * time.Second)
+ // Wait for apps to reach Install phase before verification
+ for _, appSource := range allAppSourceInfo {
+ err = testenv.WaitForAllAppsPhase(ctx, deployment, testcaseEnvInst, appSource.CrName, appSource.CrKind, appSource.CrAppSourceName, appSource.CrAppList, enterpriseApi.PhaseInstall, testenv.AppInstallTimeout)
+ Expect(err).To(Succeed(), "Timed out waiting for apps to reach Install phase on %s", appSource.CrName)
+ }
testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "")
// Verify no pods reset by checking the pod age
@@ -1277,7 +1280,11 @@ var _ = Describe("c3appfw test", func() {
shcAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameLocalShc, CrAppSourceVolumeName: appSourceVolumeNameShcLocal, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
shcAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameClusterShc, CrAppSourceVolumeName: appSourceVolumeNameShcCluster, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster}
- time.Sleep(2 * time.Minute) // FIXME adding sleep to see if verification succeedes
+ // Wait for apps to reach Install phase before verification
+ for _, appSource := range allAppSourceInfo {
+ err = testenv.WaitForAllAppsPhase(ctx, deployment, testcaseEnvInst, appSource.CrName, appSource.CrKind, appSource.CrAppSourceName, appSource.CrAppList, enterpriseApi.PhaseInstall, testenv.AppInstallTimeout)
+ Expect(err).To(Succeed(), "Timed out waiting for apps to reach Install phase on %s", appSource.CrName)
+ }
ClusterMasterBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "")
// Verify no pods reset by checking the pod age
diff --git a/test/appframework_aws/c3/manager_appframework_test.go b/test/appframework_aws/c3/manager_appframework_test.go
index ba0b3e8ea..f4112992f 100644
--- a/test/appframework_aws/c3/manager_appframework_test.go
+++ b/test/appframework_aws/c3/manager_appframework_test.go
@@ -427,10 +427,12 @@ var _ = Describe("c3appfw test", func() {
err = deployment.UpdateCR(ctx, idxc)
Expect(err).To(Succeed(), "Failed upgrade Indexer Cluster image")
- // Allow time for update to take effect
- time.Sleep(1 * time.Second)
+ // Wait for Cluster Manager to reach Ready phase after image upgrade
+ // Image upgrades require longer timeout due to pod restarts
+ err = testenv.WaitForClusterManagerPhase(ctx, deployment, testcaseEnvInst.GetName(), cm.Name, enterpriseApi.PhaseReady, 10*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for ClusterManager to reach Ready phase")
- // Ensure Cluster Manager goes to Ready phase
+ // Ensure Cluster Manager stays in Ready phase
testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst)
// Wait for License Manager to be in READY phase
@@ -757,7 +759,11 @@ var _ = Describe("c3appfw test", func() {
cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames}
shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo}
- time.Sleep(60 * time.Second)
+ // Wait for apps to reach Install phase before verification
+ for _, appSource := range allAppSourceInfo {
+ err = testenv.WaitForAllAppsPhase(ctx, deployment, testcaseEnvInst, appSource.CrName, appSource.CrKind, appSource.CrAppSourceName, appSource.CrAppList, enterpriseApi.PhaseInstall, testenv.AppInstallTimeout)
+ Expect(err).To(Succeed(), "Timed out waiting for apps to reach Install phase on %s", appSource.CrName)
+ }
testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "")
// Verify no pods reset by checking the pod age
@@ -806,6 +812,10 @@ var _ = Describe("c3appfw test", func() {
// Ensure Indexer Cluster go to Ready phase
testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst)
+ // Wait for ScaledUp event to confirm scaling completed successfully
+ err = testenv.WaitForScaledUp(ctx, deployment, testcaseEnvInst.GetName(), idxcName, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for ScaledUp event on IndexerCluster")
+
// Verify New Indexer On Cluster Manager
indexerName := fmt.Sprintf(testenv.IndexerPod, deployment.GetName(), scaledIndexerReplicas-1)
testcaseEnvInst.Log.Info(fmt.Sprintf("Checking for New Indexer %s On Cluster Manager", indexerName))
@@ -899,6 +909,10 @@ var _ = Describe("c3appfw test", func() {
// Ensure Indexer Cluster go to Ready phase
testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst)
+ // Wait for ScaledDown event to confirm scaling completed successfully
+ err = testenv.WaitForScaledDown(ctx, deployment, testcaseEnvInst.GetName(), idxcName, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for ScaledDown event on IndexerCluster")
+
// Ensure Search Head Cluster go to Ready phase
testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst)
@@ -1415,7 +1429,11 @@ var _ = Describe("c3appfw test", func() {
shcAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameLocalShc, CrAppSourceVolumeName: appSourceVolumeNameShcLocal, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
shcAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameClusterShc, CrAppSourceVolumeName: appSourceVolumeNameShcCluster, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster}
- time.Sleep(2 * time.Minute) // FIXME adding sleep to see if verification succeedes
+ // Wait for apps to reach Install phase before verification
+ for _, appSource := range allAppSourceInfo {
+ err = testenv.WaitForAllAppsPhase(ctx, deployment, testcaseEnvInst, appSource.CrName, appSource.CrKind, appSource.CrAppSourceName, appSource.CrAppList, enterpriseApi.PhaseInstall, testenv.AppInstallTimeout)
+ Expect(err).To(Succeed(), "Timed out waiting for apps to reach Install phase on %s", appSource.CrName)
+ }
clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "")
// Verify no pods reset by checking the pod age
diff --git a/test/appframework_aws/m4/appframework_aws_test.go b/test/appframework_aws/m4/appframework_aws_test.go
index eab0b7023..e4aefffdf 100644
--- a/test/appframework_aws/m4/appframework_aws_test.go
+++ b/test/appframework_aws/m4/appframework_aws_test.go
@@ -1140,7 +1140,10 @@ var _ = Describe("m4appfw test", func() {
err = deployment.UpdateCR(ctx, config)
Expect(err).To(Succeed(), "Unable to update config map")
- time.Sleep(2 * time.Minute)
+ // Wait for Monitoring Console to reach Ready phase instead of using time.Sleep
+ err = testenv.WaitForMonitoringConsolePhase(ctx, deployment, testcaseEnvInst.GetName(), mc.Name, enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for MonitoringConsole to reach Ready phase")
+
// Verify Monitoring Console is ready and stays in ready state
testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst)
@@ -1710,7 +1713,10 @@ var _ = Describe("m4appfw test", func() {
// Ensure Search Head Cluster go to Ready phase
testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst)
- time.Sleep(60 * time.Second)
+ // Wait for SearchHeadCluster to reach Ready phase instead of using time.Sleep
+ err = testenv.WaitForSearchHeadClusterPhase(ctx, deployment, testcaseEnvInst.GetName(), shc.Name, enterpriseApi.PhaseReady, 60*time.Second)
+ Expect(err).To(Succeed(), "Timed out waiting for SearchHeadCluster to reach Ready phase")
+
// Wait for polling interval to pass
testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName()+"-shc", shc.Kind, appSourceNameShc, appFileList)
@@ -2278,9 +2284,10 @@ var _ = Describe("m4appfw test", func() {
err = deployment.UpdateCR(ctx, config)
Expect(err).To(Succeed(), "Unable to update config map for Search Head Cluster")
- // Wait 5 seconds to be sure reconcile caused by CR update and config map update are done
- testcaseEnvInst.Log.Info("Wait 5 seconds to be sure reconcile caused by CR update and config map update are done")
- time.Sleep(5 * time.Second)
+ // Wait for ClusterManager to reach Ready phase instead of using time.Sleep
+ testcaseEnvInst.Log.Info("Wait for ClusterManager and SearchHeadCluster to reach Ready phase")
+ err = testenv.WaitForClusterManagerPhase(ctx, deployment, testcaseEnvInst.GetName(), cm.Name, enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for ClusterManager to reach Ready phase")
// Verify status is 'ON' in config map for Cluster Master and Search Head Cluster
testcaseEnvInst.Log.Info("Verify status is 'ON' in config map for Cluster Master and Search Head Cluster")
diff --git a/test/appframework_aws/m4/manager_appframework_test.go b/test/appframework_aws/m4/manager_appframework_test.go
index d99712693..459dbd1c5 100644
--- a/test/appframework_aws/m4/manager_appframework_test.go
+++ b/test/appframework_aws/m4/manager_appframework_test.go
@@ -1139,7 +1139,9 @@ var _ = Describe("m4appfw test", func() {
err = deployment.UpdateCR(ctx, config)
Expect(err).To(Succeed(), "Unable to update config map")
- time.Sleep(2 * time.Minute)
+ // Wait for Monitoring Console to reach Ready phase instead of using time.Sleep
+ err = testenv.WaitForMonitoringConsolePhase(ctx, deployment, testcaseEnvInst.GetName(), mc.Name, enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for MonitoringConsole to reach Ready phase")
// Verify Monitoring Console is ready and stays in ready state
testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst)
@@ -1709,7 +1711,10 @@ var _ = Describe("m4appfw test", func() {
// Ensure Search Head Cluster go to Ready phase
testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst)
- time.Sleep(60 * time.Second)
+ // Wait for SearchHeadCluster to reach Ready phase instead of using time.Sleep
+ err = testenv.WaitForSearchHeadClusterPhase(ctx, deployment, testcaseEnvInst.GetName(), shc.Name, enterpriseApi.PhaseReady, 60*time.Second)
+ Expect(err).To(Succeed(), "Timed out waiting for SearchHeadCluster to reach Ready phase")
+
// Wait for polling interval to pass
testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName()+"-shc", shc.Kind, appSourceNameShc, appFileList)
@@ -2277,9 +2282,10 @@ var _ = Describe("m4appfw test", func() {
err = deployment.UpdateCR(ctx, config)
Expect(err).To(Succeed(), "Unable to update config map for Search Head Cluster")
- // Wait 5 seconds to be sure reconcile caused by CR update and config map update are done
- testcaseEnvInst.Log.Info("Wait 5 seconds to be sure reconcile caused by CR update and config map update are done")
- time.Sleep(5 * time.Second)
+ // Wait for ClusterManager to reach Ready phase instead of using time.Sleep
+ testcaseEnvInst.Log.Info("Wait for ClusterManager and SearchHeadCluster to reach Ready phase")
+ err = testenv.WaitForClusterManagerPhase(ctx, deployment, testcaseEnvInst.GetName(), cm.Name, enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for ClusterManager to reach Ready phase")
// Verify status is 'ON' in config map for Cluster Manager and Search Head Cluster
testcaseEnvInst.Log.Info("Verify status is 'ON' in config map for Cluster Manager and Search Head Cluster")
diff --git a/test/appframework_aws/s1/appframework_aws_test.go b/test/appframework_aws/s1/appframework_aws_test.go
index c42746b91..ebba66060 100644
--- a/test/appframework_aws/s1/appframework_aws_test.go
+++ b/test/appframework_aws/s1/appframework_aws_test.go
@@ -30,6 +30,7 @@ import (
"github.com/splunk/splunk-operator/pkg/splunk/enterprise"
testenv "github.com/splunk/splunk-operator/test/testenv"
corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/util/wait"
)
var _ = Describe("s1appfw test", func() {
@@ -1086,10 +1087,11 @@ var _ = Describe("s1appfw test", func() {
err = deployment.UpdateCR(ctx, config)
Expect(err).To(Succeed(), "Unable to update config map")
- // Allow time for update to take effect
- time.Sleep(1 * time.Second)
+ // Wait for Standalone to reach Ready phase after config map update
+ err = testenv.WaitForStandalonePhase(ctx, deployment, testcaseEnvInst.GetName(), standalone.Name, enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for Standalone to reach Ready phase")
- // Wait for Standalone to be in READY status
+ // Verify Standalone stays in ready state
testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst)
// Verify Monitoring Console is Ready and stays in ready state
@@ -1381,17 +1383,10 @@ var _ = Describe("s1appfw test", func() {
// ############ Verify livenessProbe and readinessProbe config object and scripts############
testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe")
ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName())
- _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName)
- if err != nil {
- for i := 1; i < 10; i++ {
- _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName)
- if err == nil {
- continue
- } else {
- time.Sleep(1 * time.Second)
- }
- }
- }
+ err = wait.PollUntilContextTimeout(ctx, testenv.PollInterval, 10*time.Second, true, func(ctx context.Context) (bool, error) {
+ _, getErr := testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName)
+ return getErr == nil, nil
+ })
Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName)
// Verify App installation is in progress on Standalone
diff --git a/test/appframework_az/c3/appframework_azure_test.go b/test/appframework_az/c3/appframework_azure_test.go
index c7fea6ff3..edbeb2059 100644
--- a/test/appframework_az/c3/appframework_azure_test.go
+++ b/test/appframework_az/c3/appframework_azure_test.go
@@ -19,7 +19,6 @@ import (
"fmt"
"path/filepath"
"strings"
- "time"
. "github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
@@ -604,7 +603,11 @@ var _ = Describe("c3appfw test", func() {
cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames}
shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo}
- time.Sleep(60 * time.Second)
+ // Wait for apps to reach Install phase before verification
+ for _, appSource := range allAppSourceInfo {
+ err = testenv.WaitForAllAppsPhase(ctx, deployment, testcaseEnvInst, appSource.CrName, appSource.CrKind, appSource.CrAppSourceName, appSource.CrAppList, enterpriseApi.PhaseInstall, testenv.AppInstallTimeout)
+ Expect(err).To(Succeed(), "Timed out waiting for apps to reach Install phase on %s", appSource.CrName)
+ }
testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "")
// Verify no pods reset by checking the pod age
@@ -1435,7 +1438,11 @@ var _ = Describe("c3appfw test", func() {
shcAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameLocalShc, CrAppSourceVolumeName: appSourceVolumeNameShcLocal, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
shcAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameClusterShc, CrAppSourceVolumeName: appSourceVolumeNameShcCluster, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster}
- time.Sleep(2 * time.Minute) // FIXME adding sleep to see if verification succeedes
+ // Wait for apps to reach Install phase before verification
+ for _, appSource := range allAppSourceInfo {
+ err = testenv.WaitForAllAppsPhase(ctx, deployment, testcaseEnvInst, appSource.CrName, appSource.CrKind, appSource.CrAppSourceName, appSource.CrAppList, enterpriseApi.PhaseInstall, testenv.AppInstallTimeout)
+ Expect(err).To(Succeed(), "Timed out waiting for apps to reach Install phase on %s", appSource.CrName)
+ }
clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "")
// Verify no pods reset by checking the pod age
diff --git a/test/appframework_az/c3/manager_appframework_azure_test.go b/test/appframework_az/c3/manager_appframework_azure_test.go
index 4412efe43..752911b89 100644
--- a/test/appframework_az/c3/manager_appframework_azure_test.go
+++ b/test/appframework_az/c3/manager_appframework_azure_test.go
@@ -19,7 +19,6 @@ import (
"fmt"
"path/filepath"
"strings"
- "time"
. "github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
@@ -602,7 +601,11 @@ var _ = Describe("c3appfw test", func() {
cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames}
shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo}
- time.Sleep(60 * time.Second)
+ // Wait for apps to reach Install phase before verification
+ for _, appSource := range allAppSourceInfo {
+ err = testenv.WaitForAllAppsPhase(ctx, deployment, testcaseEnvInst, appSource.CrName, appSource.CrKind, appSource.CrAppSourceName, appSource.CrAppList, enterpriseApi.PhaseInstall, testenv.AppInstallTimeout)
+ Expect(err).To(Succeed(), "Timed out waiting for apps to reach Install phase on %s", appSource.CrName)
+ }
testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "")
// Verify no pods reset by checking the pod age
@@ -1435,7 +1438,11 @@ var _ = Describe("c3appfw test", func() {
shcAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameLocalShc, CrAppSourceVolumeName: appSourceVolumeNameShcLocal, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
shcAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameClusterShc, CrAppSourceVolumeName: appSourceVolumeNameShcCluster, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster}
- time.Sleep(2 * time.Minute) // FIXME adding sleep to see if verification succeedes
+ // Wait for apps to reach Install phase before verification
+ for _, appSource := range allAppSourceInfo {
+ err = testenv.WaitForAllAppsPhase(ctx, deployment, testcaseEnvInst, appSource.CrName, appSource.CrKind, appSource.CrAppSourceName, appSource.CrAppList, enterpriseApi.PhaseInstall, testenv.AppInstallTimeout)
+ Expect(err).To(Succeed(), "Timed out waiting for apps to reach Install phase on %s", appSource.CrName)
+ }
clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "")
// Verify no pods reset by checking the pod age
diff --git a/test/appframework_az/m4/appframework_azure_test.go b/test/appframework_az/m4/appframework_azure_test.go
index 1a9af4953..69313190e 100644
--- a/test/appframework_az/m4/appframework_azure_test.go
+++ b/test/appframework_az/m4/appframework_azure_test.go
@@ -1113,7 +1113,9 @@ var _ = Describe("m4appfw test", func() {
err = deployment.UpdateCR(ctx, config)
Expect(err).To(Succeed(), "Unable to update config map")
- time.Sleep(2 * time.Minute)
+ // Wait for Monitoring Console to reach Ready phase instead of using time.Sleep
+ err = testenv.WaitForMonitoringConsolePhase(ctx, deployment, testcaseEnvInst.GetName(), mc.Name, enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for MonitoringConsole to reach Ready phase")
// Verify Monitoring Console is ready and stays in ready state
testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst)
@@ -1686,7 +1688,9 @@ var _ = Describe("m4appfw test", func() {
// Ensure Search Head Cluster go to Ready phase
testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst)
- time.Sleep(60 * time.Second)
+ // Wait for SearchHeadCluster to reach Ready phase instead of using time.Sleep
+ err = testenv.WaitForSearchHeadClusterPhase(ctx, deployment, testcaseEnvInst.GetName(), shc.Name, enterpriseApi.PhaseReady, 60*time.Second)
+ Expect(err).To(Succeed(), "Timed out waiting for SearchHeadCluster to reach Ready phase")
// Wait for polling interval to pass
testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName()+"-shc", shc.Kind, appSourceNameShc, appFileList)
@@ -2260,9 +2264,10 @@ var _ = Describe("m4appfw test", func() {
err = deployment.UpdateCR(ctx, config)
Expect(err).To(Succeed(), "Unable to update config map for Search Head Cluster")
- // Wait 5 seconds to be sure reconcile caused by CR update and config map update are done
- testcaseEnvInst.Log.Info("Wait 5 seconds to be sure reconcile caused by CR update and config map update are done")
- time.Sleep(5 * time.Second)
+ // Wait for ClusterManager to reach Ready phase instead of using time.Sleep
+ testcaseEnvInst.Log.Info("Wait for ClusterManager and SearchHeadCluster to reach Ready phase")
+ err = testenv.WaitForClusterManagerPhase(ctx, deployment, testcaseEnvInst.GetName(), cm.Name, enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for ClusterManager to reach Ready phase")
// Verify status is 'ON' in config map for Cluster Master and Search Head Cluster
testcaseEnvInst.Log.Info("Verify status is 'ON' in config map for Cluster Master and Search Head Cluster")
diff --git a/test/appframework_az/m4/manager_appframework_azure_test.go b/test/appframework_az/m4/manager_appframework_azure_test.go
index 9cd5d9507..7d1f0e4b3 100644
--- a/test/appframework_az/m4/manager_appframework_azure_test.go
+++ b/test/appframework_az/m4/manager_appframework_azure_test.go
@@ -1112,7 +1112,9 @@ var _ = Describe("m4appfw test", func() {
err = deployment.UpdateCR(ctx, config)
Expect(err).To(Succeed(), "Unable to update config map")
- time.Sleep(2 * time.Minute)
+ // Wait for Monitoring Console to reach Ready phase instead of using time.Sleep
+ err = testenv.WaitForMonitoringConsolePhase(ctx, deployment, testcaseEnvInst.GetName(), mc.Name, enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for MonitoringConsole to reach Ready phase")
// Verify Monitoring Console is ready and stays in ready state
testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst)
@@ -1685,7 +1687,10 @@ var _ = Describe("m4appfw test", func() {
// Ensure Search Head Cluster go to Ready phase
testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst)
- time.Sleep(60 * time.Second)
+ // Wait for SearchHeadCluster to reach Ready phase instead of using time.Sleep
+ err = testenv.WaitForSearchHeadClusterPhase(ctx, deployment, testcaseEnvInst.GetName(), shc.Name, enterpriseApi.PhaseReady, 60*time.Second)
+ Expect(err).To(Succeed(), "Timed out waiting for SearchHeadCluster to reach Ready phase")
+
// Wait for polling interval to pass
testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName()+"-shc", shc.Kind, appSourceNameShc, appFileList)
@@ -2259,9 +2264,10 @@ var _ = Describe("m4appfw test", func() {
err = deployment.UpdateCR(ctx, config)
Expect(err).To(Succeed(), "Unable to update config map for Search Head Cluster")
- // Wait 5 seconds to be sure reconcile caused by CR update and config map update are done
- testcaseEnvInst.Log.Info("Wait 5 seconds to be sure reconcile caused by CR update and config map update are done")
- time.Sleep(5 * time.Second)
+ // Wait for ClusterManager to reach Ready phase instead of using time.Sleep
+ testcaseEnvInst.Log.Info("Wait for ClusterManager and SearchHeadCluster to reach Ready phase")
+ err = testenv.WaitForClusterManagerPhase(ctx, deployment, testcaseEnvInst.GetName(), cm.Name, enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for ClusterManager to reach Ready phase")
// Verify status is 'ON' in config map for Cluster Master and Search Head Cluster
testcaseEnvInst.Log.Info("Verify status is 'ON' in config map for Cluster Master and Search Head Cluster")
diff --git a/test/appframework_gcp/c3/manager_appframework_test.go b/test/appframework_gcp/c3/manager_appframework_test.go
index 66c553e47..87b599b87 100644
--- a/test/appframework_gcp/c3/manager_appframework_test.go
+++ b/test/appframework_gcp/c3/manager_appframework_test.go
@@ -19,7 +19,6 @@ import (
"fmt"
"path/filepath"
"strings"
- "time"
. "github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
@@ -764,7 +763,11 @@ var _ = Describe("c3appfw test", func() {
cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames}
shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo}
- time.Sleep(60 * time.Second)
+ // Wait for apps to reach Install phase before verification
+ for _, appSource := range allAppSourceInfo {
+ err = testenv.WaitForAllAppsPhase(ctx, deployment, testcaseEnvInst, appSource.CrName, appSource.CrKind, appSource.CrAppSourceName, appSource.CrAppList, enterpriseApi.PhaseInstall, testenv.AppInstallTimeout)
+ Expect(err).To(Succeed(), "Timed out waiting for apps to reach Install phase on %s", appSource.CrName)
+ }
testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "")
// Verify no pods reset by checking the pod age
@@ -1422,7 +1425,11 @@ var _ = Describe("c3appfw test", func() {
shcAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameLocalShc, CrAppSourceVolumeName: appSourceVolumeNameShcLocal, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
shcAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameClusterShc, CrAppSourceVolumeName: appSourceVolumeNameShcCluster, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames}
allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster}
- time.Sleep(2 * time.Minute) // FIXME adding sleep to see if verification succeedes
+ // Wait for apps to reach Install phase before verification
+ for _, appSource := range allAppSourceInfo {
+ err = testenv.WaitForAllAppsPhase(ctx, deployment, testcaseEnvInst, appSource.CrName, appSource.CrKind, appSource.CrAppSourceName, appSource.CrAppList, enterpriseApi.PhaseInstall, testenv.AppInstallTimeout)
+ Expect(err).To(Succeed(), "Timed out waiting for apps to reach Install phase on %s", appSource.CrName)
+ }
clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "")
// Verify no pods reset by checking the pod age
diff --git a/test/appframework_gcp/m4/appframework_gcs_test.go b/test/appframework_gcp/m4/appframework_gcs_test.go
index e7276d4f6..0216e1f4c 100644
--- a/test/appframework_gcp/m4/appframework_gcs_test.go
+++ b/test/appframework_gcp/m4/appframework_gcs_test.go
@@ -1140,7 +1140,10 @@ var _ = Describe("m4appfw test", func() {
err = deployment.UpdateCR(ctx, config)
Expect(err).To(Succeed(), "Unable to update config map")
- time.Sleep(2 * time.Minute)
+ // Wait for Monitoring Console to reach Ready phase instead of using time.Sleep
+ err = testenv.WaitForMonitoringConsolePhase(ctx, deployment, testcaseEnvInst.GetName(), mc.Name, enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for MonitoringConsole to reach Ready phase")
+
// Verify Monitoring Console is ready and stays in ready state
testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst)
@@ -1710,7 +1713,10 @@ var _ = Describe("m4appfw test", func() {
// Ensure Search Head Cluster go to Ready phase
testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst)
- time.Sleep(60 * time.Second)
+ // Wait for SearchHeadCluster to reach Ready phase instead of using time.Sleep
+ err = testenv.WaitForSearchHeadClusterPhase(ctx, deployment, testcaseEnvInst.GetName(), shc.Name, enterpriseApi.PhaseReady, 60*time.Second)
+ Expect(err).To(Succeed(), "Timed out waiting for SearchHeadCluster to reach Ready phase")
+
// Wait for polling interval to pass
testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName()+"-shc", shc.Kind, appSourceNameShc, appFileList)
@@ -2278,9 +2284,10 @@ var _ = Describe("m4appfw test", func() {
err = deployment.UpdateCR(ctx, config)
Expect(err).To(Succeed(), "Unable to update config map for Search Head Cluster")
- // Wait 5 seconds to be sure reconcile caused by CR update and config map update are done
- testcaseEnvInst.Log.Info("Wait 5 seconds to be sure reconcile caused by CR update and config map update are done")
- time.Sleep(5 * time.Second)
+ // Wait for ClusterManager to reach Ready phase instead of using time.Sleep
+ testcaseEnvInst.Log.Info("Wait for ClusterManager and SearchHeadCluster to reach Ready phase")
+ err = testenv.WaitForClusterManagerPhase(ctx, deployment, testcaseEnvInst.GetName(), cm.Name, enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for ClusterManager to reach Ready phase")
// Verify status is 'ON' in config map for Cluster Master and Search Head Cluster
testcaseEnvInst.Log.Info("Verify status is 'ON' in config map for Cluster Master and Search Head Cluster")
diff --git a/test/appframework_gcp/m4/manager_appframework_test.go b/test/appframework_gcp/m4/manager_appframework_test.go
index 24730de60..b0c9d318d 100644
--- a/test/appframework_gcp/m4/manager_appframework_test.go
+++ b/test/appframework_gcp/m4/manager_appframework_test.go
@@ -1139,7 +1139,10 @@ var _ = Describe("m4appfw test", func() {
err = deployment.UpdateCR(ctx, config)
Expect(err).To(Succeed(), "Unable to update config map")
- time.Sleep(2 * time.Minute)
+ // Wait for Monitoring Console to reach Ready phase instead of using time.Sleep
+ err = testenv.WaitForMonitoringConsolePhase(ctx, deployment, testcaseEnvInst.GetName(), mc.Name, enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for MonitoringConsole to reach Ready phase")
+
// Verify Monitoring Console is ready and stays in ready state
testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst)
@@ -1709,7 +1712,9 @@ var _ = Describe("m4appfw test", func() {
// Ensure Search Head Cluster go to Ready phase
testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst)
- time.Sleep(60 * time.Second)
+ // Wait for SearchHeadCluster to reach Ready phase instead of using time.Sleep
+ err = testenv.WaitForSearchHeadClusterPhase(ctx, deployment, testcaseEnvInst.GetName(), shc.Name, enterpriseApi.PhaseReady, 60*time.Second)
+ Expect(err).To(Succeed(), "Timed out waiting for SearchHeadCluster to reach Ready phase")
// Wait for polling interval to pass
testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName()+"-shc", shc.Kind, appSourceNameShc, appFileList)
@@ -2277,9 +2282,10 @@ var _ = Describe("m4appfw test", func() {
err = deployment.UpdateCR(ctx, config)
Expect(err).To(Succeed(), "Unable to update config map for Search Head Cluster")
- // Wait 5 seconds to be sure reconcile caused by CR update and config map update are done
- testcaseEnvInst.Log.Info("Wait 5 seconds to be sure reconcile caused by CR update and config map update are done")
- time.Sleep(5 * time.Second)
+ // Wait for ClusterManager to reach Ready phase instead of using time.Sleep
+ testcaseEnvInst.Log.Info("Wait for ClusterManager and SearchHeadCluster to reach Ready phase")
+ err = testenv.WaitForClusterManagerPhase(ctx, deployment, testcaseEnvInst.GetName(), cm.Name, enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for ClusterManager to reach Ready phase")
// Verify status is 'ON' in config map for Cluster Manager and Search Head Cluster
testcaseEnvInst.Log.Info("Verify status is 'ON' in config map for Cluster Manager and Search Head Cluster")
diff --git a/test/appframework_gcp/s1/appframework_gcs_test.go b/test/appframework_gcp/s1/appframework_gcs_test.go
index 2f23abb3f..cc4533977 100644
--- a/test/appframework_gcp/s1/appframework_gcs_test.go
+++ b/test/appframework_gcp/s1/appframework_gcs_test.go
@@ -30,6 +30,7 @@ import (
"github.com/splunk/splunk-operator/pkg/splunk/enterprise"
testenv "github.com/splunk/splunk-operator/test/testenv"
corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/util/wait"
)
var _ = Describe("s1appfw test", func() {
@@ -1370,17 +1371,10 @@ var _ = Describe("s1appfw test", func() {
// ############ Verify livenessProbe and readinessProbe config object and scripts############
testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe")
ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName())
- _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName)
- if err != nil {
- for i := 1; i < 10; i++ {
- _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName)
- if err == nil {
- continue
- } else {
- time.Sleep(1 * time.Second)
- }
- }
- }
+ err = wait.PollUntilContextTimeout(ctx, testenv.PollInterval, 10*time.Second, true, func(ctx context.Context) (bool, error) {
+ _, getErr := testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName)
+ return getErr == nil, nil
+ })
Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName)
// Verify App installation is in progress on Standalone
diff --git a/test/ingest_search/ingest_search_test.go b/test/ingest_search/ingest_search_test.go
index dfa686ad3..5a970bcd6 100644
--- a/test/ingest_search/ingest_search_test.go
+++ b/test/ingest_search/ingest_search_test.go
@@ -206,8 +206,10 @@ var _ = Describe("Ingest and Search Test", func() {
searchString := fmt.Sprintf("index=%s | stats count by host", indexName)
- // Wait for ingestion lag prior to searching
- time.Sleep(2 * time.Second)
+ // Wait for search results to be available instead of fixed sleep
+ err = testenv.WaitForSearchResultsNonEmpty(ctx, deployment, podName, searchString, 30*time.Second)
+ Expect(err).To(Succeed(), "Timed out waiting for search results")
+
searchResultsResp, err := testenv.PerformSearchSync(ctx, podName, searchString, deployment)
Expect(err).To(Succeed(), "Failed to execute search '%s' on pod %s", podName, searchString)
diff --git a/test/licensemanager/manager_lm_c3_test.go b/test/licensemanager/manager_lm_c3_test.go
index a977967ce..230257852 100644
--- a/test/licensemanager/manager_lm_c3_test.go
+++ b/test/licensemanager/manager_lm_c3_test.go
@@ -327,10 +327,11 @@ var _ = Describe("Licensemanager test", func() {
uploadedApps = append(uploadedApps, uploadedFiles...)
}
- // Wait for the poll period for the apps to be downloaded
- time.Sleep(2 * time.Minute)
+ // Wait for LM to be in READY status (polls for state instead of fixed sleep)
+ err = testenv.WaitForLicenseManagerPhase(ctx, deployment, testcaseEnvInst.GetName(), deployment.GetName(), enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for LicenseManager to reach Ready phase")
- // Wait for LM to be in READY status
+ // Verify LM stays in ready state
testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst)
// Verify apps are copied at the correct location on LM (/etc/apps/)
diff --git a/test/licensemaster/lm_c3_test.go b/test/licensemaster/lm_c3_test.go
index 1952ebe29..92fe65713 100644
--- a/test/licensemaster/lm_c3_test.go
+++ b/test/licensemaster/lm_c3_test.go
@@ -324,10 +324,11 @@ var _ = Describe("licensemaster test", func() {
uploadedApps = append(uploadedApps, uploadedFiles...)
}
- // Wait for the poll period for the apps to be downloaded
- time.Sleep(2 * time.Minute)
+ // Wait for LM to reach Ready phase (polls for state instead of fixed sleep)
+ err = testenv.WaitForLicenseMasterPhase(ctx, deployment, testcaseEnvInst.GetName(), deployment.GetName(), enterpriseApi.PhaseReady, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for LicenseMaster to reach Ready phase")
- // Wait for LM to be in READY status
+ // Verify LM stays in ready state
testenv.LicenseMasterReady(ctx, deployment, testcaseEnvInst)
// Verify apps are copied at the correct location on LM (/etc/apps/)
diff --git a/test/monitoring_console/manager_monitoring_console_test.go b/test/monitoring_console/manager_monitoring_console_test.go
index 7e50c58a6..fb97bbea6 100644
--- a/test/monitoring_console/manager_monitoring_console_test.go
+++ b/test/monitoring_console/manager_monitoring_console_test.go
@@ -468,10 +468,9 @@ var _ = Describe("Monitoring Console test", func() {
// Verify Monitoring Console is Ready and stays in ready state
testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst)
- time.Sleep(60 * time.Second)
-
- // Check Cluster Manager in Monitoring Console Config Map
- testenv.VerifyPodsInMCConfigMap(ctx, deployment, testcaseEnvInst, []string{fmt.Sprintf(testenv.ClusterManagerServiceName, deployment.GetName())}, splcommon.ClusterManagerURL, mcName, true)
+ // Wait for Cluster Manager to appear in Monitoring Console Config Map
+ err = testenv.WaitForPodsInMCConfigMap(ctx, deployment, testcaseEnvInst, []string{fmt.Sprintf(testenv.ClusterManagerServiceName, deployment.GetName())}, splcommon.ClusterManagerURL, mcName, true, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for Cluster Manager in MC ConfigMap")
// Check Deployer in Monitoring Console Config Map
testenv.VerifyPodsInMCConfigMap(ctx, deployment, testcaseEnvInst, []string{fmt.Sprintf(testenv.DeployerServiceName, deployment.GetName())}, "SPLUNK_DEPLOYER_URL", mcName, true)
@@ -480,11 +479,9 @@ var _ = Describe("Monitoring Console test", func() {
shPods := testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), defaultSHReplicas, false, 0)
testenv.VerifyPodsInMCConfigMap(ctx, deployment, testcaseEnvInst, shPods, "SPLUNK_SEARCH_HEAD_URL", mcName, true)
- // Add a sleep here in case MC pod restarts to add peers
- time.Sleep(300 * time.Second)
-
- // Check Monitoring console Pod is configured with all search head
- testenv.VerifyPodsInMCConfigString(ctx, deployment, testcaseEnvInst, shPods, mcName, true, false)
+ // Wait for Monitoring console Pod to be configured with all search head
+ err = testenv.WaitForPodsInMCConfigString(ctx, deployment, testcaseEnvInst, shPods, mcName, true, false, 5*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for search heads in MC config")
// Check Monitoring console is configured with all Indexer in Name Space
indexerPods := testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), defaultIndexerReplicas, false, 0)
@@ -661,11 +658,9 @@ var _ = Describe("Monitoring Console test", func() {
// Verify Monitoring Console is Ready and stays in ready state
testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst)
- // Adding a sleep, in case MC restarts to update peers list
- time.Sleep(300 * time.Second)
-
- // Check Monitoring console Pod is configured with all search head
- testenv.VerifyPodsInMCConfigString(ctx, deployment, testcaseEnvInst, shPods, mcName, true, false)
+ // Wait for Monitoring console Pod to be configured with all search head
+ err = testenv.WaitForPodsInMCConfigString(ctx, deployment, testcaseEnvInst, shPods, mcName, true, false, 5*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for search heads in MC config")
// Check Monitoring console is configured with all Indexer in Name Space
indexerPods := testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), defaultIndexerReplicas, false, 0)
@@ -781,11 +776,13 @@ var _ = Describe("Monitoring Console test", func() {
testenv.VerifyPodsInMCConfigMap(ctx, deployment, testcaseEnvInst, shPods, "SPLUNK_SEARCH_HEAD_URL", mcTwoName, true)
testcaseEnvInst.Log.Info("Verify Search Head Pods on Monitoring Console Pod after SHC Reconfig")
- testenv.VerifyPodsInMCConfigString(ctx, deployment, testcaseEnvInst, shPods, mcTwoName, true, false)
+ err = testenv.WaitForPodsInMCConfigString(ctx, deployment, testcaseEnvInst, shPods, mcTwoName, true, false, 5*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for search heads in MC two config after SHC reconfig")
// Check Monitoring console Two is configured with all Indexer in Name Space
testcaseEnvInst.Log.Info("Checking for Indexer Pod on MC TWO after SHC Reconfig")
- testenv.VerifyPodsInMCConfigString(ctx, deployment, testcaseEnvInst, indexerPods, mcTwoName, true, true)
+ err = testenv.WaitForPodsInMCConfigString(ctx, deployment, testcaseEnvInst, indexerPods, mcTwoName, true, true, 5*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for indexers in MC two config after SHC reconfig")
// ############################ VERIFICATION FOR MONITORING CONSOLE ONE POST SHC RECONFIG ###############################
@@ -805,7 +802,8 @@ var _ = Describe("Monitoring Console test", func() {
testenv.VerifyPodsInMCConfigMap(ctx, deployment, testcaseEnvInst, shPods, "SPLUNK_SEARCH_HEAD_URL", mcName, false)
testcaseEnvInst.Log.Info("Verify Search Head Pods NOT on Monitoring Console ONE Pod after Search Head Reconfig")
- testenv.VerifyPodsInMCConfigString(ctx, deployment, testcaseEnvInst, shPods, mcName, false, false)
+ err = testenv.WaitForPodsInMCConfigString(ctx, deployment, testcaseEnvInst, shPods, mcName, false, false, 5*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for search heads to be removed from MC one config after SHC reconfig")
// Check Monitoring console One is Not configured with all Indexer in Name Space
// CSPL-619
diff --git a/test/monitoring_console/monitoring_console_test.go b/test/monitoring_console/monitoring_console_test.go
index 4189e9ff1..3c4e858fe 100644
--- a/test/monitoring_console/monitoring_console_test.go
+++ b/test/monitoring_console/monitoring_console_test.go
@@ -107,10 +107,9 @@ var _ = Describe("Monitoring Console test", func() {
// Verify Monitoring Console is Ready and stays in ready state
testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst)
- time.Sleep(60 * time.Second)
-
- // Check Cluster Master in Monitoring Console Config Map
- testenv.VerifyPodsInMCConfigMap(ctx, deployment, testcaseEnvInst, []string{fmt.Sprintf(testenv.ClusterMasterServiceName, deployment.GetName())}, "SPLUNK_CLUSTER_MASTER_URL", mcName, true)
+ // Wait for Cluster Master to appear in Monitoring Console Config Map
+ err = testenv.WaitForPodsInMCConfigMap(ctx, deployment, testcaseEnvInst, []string{fmt.Sprintf(testenv.ClusterMasterServiceName, deployment.GetName())}, "SPLUNK_CLUSTER_MASTER_URL", mcName, true, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for Cluster Master in MC ConfigMap")
// Check Deployer in Monitoring Console Config Map
testenv.VerifyPodsInMCConfigMap(ctx, deployment, testcaseEnvInst, []string{fmt.Sprintf(testenv.DeployerServiceName, deployment.GetName())}, "SPLUNK_DEPLOYER_URL", mcName, true)
diff --git a/test/secret/secret_c3_test.go b/test/secret/secret_c3_test.go
index 698c84786..39dfbbfa1 100644
--- a/test/secret/secret_c3_test.go
+++ b/test/secret/secret_c3_test.go
@@ -16,6 +16,7 @@ package secret
import (
"context"
"fmt"
+ "time"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
@@ -103,6 +104,11 @@ var _ = Describe("Secret Test for SVA C3", func() {
// Ensure Indexers go to Ready phase
testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst)
+ // Wait for ClusterInitialized event to confirm cluster is fully initialized
+ idxcName := deployment.GetName() + "-idxc"
+ err = testenv.WaitForClusterInitialized(ctx, deployment, testcaseEnvInst.GetName(), idxcName, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for ClusterInitialized event on IndexerCluster")
+
// Deploy Monitoring Console CRD
mc, err := deployment.DeployMonitoringConsole(ctx, deployment.GetName(), deployment.GetName())
Expect(err).To(Succeed(), "Unable to deploy Monitoring Console One instance")
@@ -143,9 +149,18 @@ var _ = Describe("Secret Test for SVA C3", func() {
// Ensure Search Head Cluster go to Ready phase
testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst)
+ // Wait for PasswordSyncCompleted event on SearchHeadCluster
+ shcName := deployment.GetName() + "-shc"
+ err = testenv.WaitForPasswordSyncCompleted(ctx, deployment, testcaseEnvInst.GetName(), shcName, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for PasswordSyncCompleted event on SearchHeadCluster")
+
// Ensure Indexers go to Ready phase
testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst)
+ // Wait for PasswordSyncCompleted event on IndexerCluster
+ err = testenv.WaitForPasswordSyncCompleted(ctx, deployment, testcaseEnvInst.GetName(), idxcName, 2*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for PasswordSyncCompleted event on IndexerCluster")
+
// wait for custom resource resource version to change
testenv.VerifyCustomResourceVersionChanged(ctx, deployment, testcaseEnvInst, mc, resourceVersion)
diff --git a/test/smartstore/manager_smartstore_test.go b/test/smartstore/manager_smartstore_test.go
index b90a68337..5374ae532 100644
--- a/test/smartstore/manager_smartstore_test.go
+++ b/test/smartstore/manager_smartstore_test.go
@@ -68,8 +68,11 @@ var _ = Describe("Smartstore test", func() {
standalone, err := deployment.DeployStandaloneWithGivenSmartStoreSpec(ctx, deployment.GetName(), smartStoreSpec)
Expect(err).To(Succeed(), "Unable to deploy standalone instance ")
- time.Sleep(1 * time.Minute)
- // Verify standalone goes to ready state
+ // Wait for Standalone to reach Ready phase
+ err = testenv.WaitForStandalonePhase(ctx, deployment, testcaseEnvInst.GetName(), standalone.Name, enterpriseApi.PhaseReady, 5*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for Standalone to reach Ready phase")
+
+ // Verify standalone goes to ready state and stays ready
testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst)
// Check index on pod
diff --git a/test/smartstore/smartstore_test.go b/test/smartstore/smartstore_test.go
index c2d550411..8ec05053b 100644
--- a/test/smartstore/smartstore_test.go
+++ b/test/smartstore/smartstore_test.go
@@ -69,8 +69,11 @@ var _ = Describe("Smartstore test", func() {
standalone, err := deployment.DeployStandaloneWithGivenSmartStoreSpec(ctx, deployment.GetName(), smartStoreSpec)
Expect(err).To(Succeed(), "Unable to deploy standalone instance ")
- time.Sleep(1 * time.Minute)
- // Verify standalone goes to ready state
+ // Wait for Standalone to reach Ready phase
+ err = testenv.WaitForStandalonePhase(ctx, deployment, testcaseEnvInst.GetName(), standalone.Name, enterpriseApi.PhaseReady, 5*time.Minute)
+ Expect(err).To(Succeed(), "Timed out waiting for Standalone to reach Ready phase")
+
+ // Verify standalone goes to ready state and stays ready
testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst)
// Check index on pod
diff --git a/test/testenv/appframework_utils.go b/test/testenv/appframework_utils.go
index e9879679b..7cb4316dc 100644
--- a/test/testenv/appframework_utils.go
+++ b/test/testenv/appframework_utils.go
@@ -11,6 +11,7 @@ import (
splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
corev1 "k8s.io/api/core/v1"
+ wait "k8s.io/apimachinery/pkg/util/wait"
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
@@ -168,20 +169,17 @@ func GetPodAppInstallStatus(ctx context.Context, deployment *Deployment, podName
stdin := fmt.Sprintf("/opt/splunk/bin/splunk display app '%s' -auth admin:$(cat /mnt/splunk-secrets/password)", appname)
command := []string{"/bin/sh"}
var stdout, stderr string
- var err error
- for i := 0; i < 10; i++ {
- stdout, stderr, err = deployment.PodExecCommand(ctx, podName, command, stdin, false)
- if err == nil {
- continue
- } else if err != nil && i == 9 {
- logf.Log.Error(err, "Failed to execute command on pod", "pod", podName, "command", command, "stdin", stdin)
- return "", err
- } else {
- time.Sleep(1 * time.Second)
- }
+ err := wait.PollUntilContextTimeout(ctx, PollInterval, 10*time.Second, true, func(ctx context.Context) (bool, error) {
+ var execErr error
+ stdout, stderr, execErr = deployment.PodExecCommand(ctx, podName, command, stdin, false)
+ return execErr == nil, nil
+ })
+ if err != nil {
+ logf.Log.Error(err, "Failed to execute command on pod", "pod", podName, "command", command, "stdin", stdin, "stderr", stderr)
+ return "", err
}
- logf.Log.Info("Command executed", "on pod", podName, "command", command, "stdin", stdin, "stdout", stdout, "stderr", stderr)
+ logf.Log.Info("Command executed", "on pod", podName, "command", command, "stdin", stdin, "stdout", stdout)
return strings.TrimSuffix(stdout, "\n"), nil
}
@@ -427,22 +425,27 @@ func GenerateAppFrameworkSpec(ctx context.Context, testenvInstance *TestCaseEnv,
return appFrameworkSpec
}
-// WaitforPhaseChange Wait for 2 mins or when phase change on is seen on a CR for any particular app
+// WaitforPhaseChange Wait for timeout or when phase change is seen on a CR for any particular app
+// Deprecated: Use WaitForAppPhaseChange instead for better timeout control
func WaitforPhaseChange(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, name string, crKind string, appSourceName string, appList []string) {
- startTime := time.Now()
+ _ = WaitForAppPhaseChange(ctx, deployment, testenvInstance, name, crKind, appSourceName, appList, 2*time.Minute)
+}
- for time.Since(startTime) <= time.Duration(2*time.Minute) {
+// WaitForAppPhaseChange waits for any app in the list to change from PhaseInstall to another phase
+func WaitForAppPhaseChange(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, name string, crKind string, appSourceName string, appList []string, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, PollInterval, timeout, true, func(ctx context.Context) (bool, error) {
for _, appName := range appList {
appDeploymentInfo, err := GetAppDeploymentInfo(ctx, deployment, testenvInstance, name, crKind, appSourceName, appName)
if err != nil {
testenvInstance.Log.Error(err, "Failed to get app deployment info")
+ continue
}
if appDeploymentInfo.PhaseInfo.Phase != enterpriseApi.PhaseInstall {
- return
+ return true, nil
}
}
- time.Sleep(1 * time.Second)
- }
+ return false, nil
+ })
}
// AppFrameWorkVerifications will perform several verifications needed between the different steps of App Framework tests
diff --git a/test/testenv/testcaseenv.go b/test/testenv/testcaseenv.go
index cb3c8a107..4e0ccb3c4 100644
--- a/test/testenv/testcaseenv.go
+++ b/test/testenv/testcaseenv.go
@@ -215,6 +215,7 @@ func (testenv *TestCaseEnv) popCleanupFunc() (cleanupFunc, error) {
}
func (testenv *TestCaseEnv) createNamespace() error {
+ ctx := context.Background()
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
@@ -222,14 +223,15 @@ func (testenv *TestCaseEnv) createNamespace() error {
},
}
- err := testenv.GetKubeClient().Create(context.TODO(), namespace)
+ err := testenv.GetKubeClient().Create(ctx, namespace)
if err != nil {
return err
}
// Cleanup the namespace when we teardown this testenv
testenv.pushCleanupFunc(func() error {
- err := testenv.GetKubeClient().Delete(context.TODO(), namespace)
+ cleanupCtx := context.Background()
+ err := testenv.GetKubeClient().Delete(cleanupCtx, namespace)
if err != nil {
testenv.Log.Error(err, "Unable to delete namespace")
return err
@@ -237,7 +239,7 @@ func (testenv *TestCaseEnv) createNamespace() error {
if err = wait.PollImmediate(PollInterval, DefaultTimeout, func() (bool, error) {
key := client.ObjectKey{Name: testenv.namespace, Namespace: testenv.namespace}
ns := &corev1.Namespace{}
- err := testenv.GetKubeClient().Get(context.TODO(), key, ns)
+ err := testenv.GetKubeClient().Get(cleanupCtx, key, ns)
if errors.IsNotFound(err) {
return true, nil
}
@@ -257,7 +259,7 @@ func (testenv *TestCaseEnv) createNamespace() error {
if err := wait.PollImmediate(PollInterval, DefaultTimeout, func() (bool, error) {
key := client.ObjectKey{Name: testenv.namespace}
ns := &corev1.Namespace{}
- err := testenv.GetKubeClient().Get(context.TODO(), key, ns)
+ err := testenv.GetKubeClient().Get(ctx, key, ns)
if err != nil {
// Try again
if errors.IsNotFound(err) {
@@ -279,6 +281,7 @@ func (testenv *TestCaseEnv) createNamespace() error {
}
func (testenv *TestCaseEnv) createSA() error {
+ ctx := context.Background()
sa := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: testenv.serviceAccountName,
@@ -286,14 +289,14 @@ func (testenv *TestCaseEnv) createSA() error {
},
}
- err := testenv.GetKubeClient().Create(context.TODO(), sa)
+ err := testenv.GetKubeClient().Create(ctx, sa)
if err != nil {
testenv.Log.Error(err, "Unable to create service account")
return err
}
testenv.pushCleanupFunc(func() error {
- err := testenv.GetKubeClient().Delete(context.TODO(), sa)
+ err := testenv.GetKubeClient().Delete(context.Background(), sa)
if err != nil {
testenv.Log.Error(err, "Unable to delete service account")
return err
@@ -305,16 +308,17 @@ func (testenv *TestCaseEnv) createSA() error {
}
func (testenv *TestCaseEnv) createRole() error {
+ ctx := context.Background()
role := newRole(testenv.roleName, testenv.namespace)
- err := testenv.GetKubeClient().Create(context.TODO(), role)
+ err := testenv.GetKubeClient().Create(ctx, role)
if err != nil {
testenv.Log.Error(err, "Unable to create role")
return err
}
testenv.pushCleanupFunc(func() error {
- err := testenv.GetKubeClient().Delete(context.TODO(), role)
+ err := testenv.GetKubeClient().Delete(context.Background(), role)
if err != nil {
testenv.Log.Error(err, "Unable to delete role")
return err
@@ -326,16 +330,17 @@ func (testenv *TestCaseEnv) createRole() error {
}
func (testenv *TestCaseEnv) createRoleBinding() error {
+ ctx := context.Background()
binding := newRoleBinding(testenv.roleBindingName, testenv.serviceAccountName, testenv.namespace, testenv.roleName)
- err := testenv.GetKubeClient().Create(context.TODO(), binding)
+ err := testenv.GetKubeClient().Create(ctx, binding)
if err != nil {
testenv.Log.Error(err, "Unable to create rolebinding")
return err
}
testenv.pushCleanupFunc(func() error {
- err := testenv.GetKubeClient().Delete(context.TODO(), binding)
+ err := testenv.GetKubeClient().Delete(context.Background(), binding)
if err != nil {
testenv.Log.Error(err, "Unable to delete rolebinding")
return err
@@ -347,6 +352,7 @@ func (testenv *TestCaseEnv) createRoleBinding() error {
}
func (testenv *TestCaseEnv) attachPVCToOperator(name string) error {
+ ctx := context.Background()
var err error
// volume name which refers to PVC to be attached
@@ -354,7 +360,7 @@ func (testenv *TestCaseEnv) attachPVCToOperator(name string) error {
namespacedName := client.ObjectKey{Name: testenv.operatorName, Namespace: testenv.namespace}
operator := &appsv1.Deployment{}
- err = testenv.GetKubeClient().Get(context.TODO(), namespacedName, operator)
+ err = testenv.GetKubeClient().Get(ctx, namespacedName, operator)
if err != nil {
testenv.Log.Error(err, "Unable to get operator", "operator name", testenv.operatorName)
return err
@@ -379,7 +385,7 @@ func (testenv *TestCaseEnv) attachPVCToOperator(name string) error {
operator.Spec.Template.Spec.Containers[0].VolumeMounts = append(operator.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
// update the operator deployment now
- err = testenv.GetKubeClient().Update(context.TODO(), operator)
+ err = testenv.GetKubeClient().Update(ctx, operator)
if err != nil {
testenv.Log.Error(err, "Unable to update operator", "operator name", testenv.operatorName)
return err
@@ -389,9 +395,10 @@ func (testenv *TestCaseEnv) attachPVCToOperator(name string) error {
}
func (testenv *TestCaseEnv) createOperator() error {
+ ctx := context.Background()
//op := newOperator(testenv.operatorName, testenv.namespace, testenv.serviceAccountName, testenv.operatorImage, testenv.splunkImage, "nil")
op := newOperator(testenv.operatorName, testenv.namespace, testenv.serviceAccountName, testenv.operatorImage, testenv.splunkImage)
- err := testenv.GetKubeClient().Create(context.TODO(), op)
+ err := testenv.GetKubeClient().Create(ctx, op)
if err != nil {
testenv.Log.Error(err, "Unable to create operator")
return err
@@ -403,7 +410,7 @@ func (testenv *TestCaseEnv) createOperator() error {
testenv.Log.Error(err, "Unable to create PVC", "pvcName", pvc.ObjectMeta.Name)
return err
}
- err = testenv.GetKubeClient().Create(context.TODO(), pvc)
+ err = testenv.GetKubeClient().Create(ctx, pvc)
if err != nil {
testenv.Log.Error(err, "Unable to create PVC")
return err
@@ -417,7 +424,7 @@ func (testenv *TestCaseEnv) createOperator() error {
}
testenv.pushCleanupFunc(func() error {
- err := testenv.GetKubeClient().Delete(context.TODO(), op)
+ err := testenv.GetKubeClient().Delete(context.Background(), op)
if err != nil {
testenv.Log.Error(err, "Unable to delete operator")
return err
@@ -425,11 +432,10 @@ func (testenv *TestCaseEnv) createOperator() error {
return nil
})
- OperatorInstallationTimeout := 5 * time.Minute
- if err := wait.PollImmediate(PollInterval, OperatorInstallationTimeout, func() (bool, error) {
+ if err := wait.PollImmediate(PollInterval, DefaultTimeout, func() (bool, error) {
key := client.ObjectKey{Name: testenv.operatorName, Namespace: testenv.namespace}
deployment := &appsv1.Deployment{}
- err := testenv.GetKubeClient().Get(context.TODO(), key, deployment)
+ err := testenv.GetKubeClient().Get(ctx, key, deployment)
if err != nil {
testenv.Log.Error(err, "operator not found waiting")
return false, nil
@@ -461,6 +467,7 @@ func (testenv *TestCaseEnv) CreateLicenseConfigMap(path string) error {
}
func (testenv *TestCaseEnv) createLicenseConfigMap() error {
+ ctx := context.Background()
lic, err := newLicenseConfigMap(testenv.licenseCMName, testenv.namespace, testenv.licenseFilePath)
if err != nil {
return err
@@ -468,7 +475,7 @@ func (testenv *TestCaseEnv) createLicenseConfigMap() error {
// Check if config map already exists
key := client.ObjectKey{Name: testenv.namespace, Namespace: testenv.namespace}
- err = testenv.GetKubeClient().Get(context.TODO(), key, lic)
+ err = testenv.GetKubeClient().Get(ctx, key, lic)
if err != nil {
testenv.Log.Info("No Existing license config map not found. Creating a new License Configmap", "Name", testenv.namespace)
@@ -478,7 +485,7 @@ func (testenv *TestCaseEnv) createLicenseConfigMap() error {
}
// Create a new licese config map
- err = testenv.GetKubeClient().Create(context.TODO(), lic)
+ err = testenv.GetKubeClient().Create(ctx, lic)
if err != nil {
testenv.Log.Error(err, "Unable to create license configmap")
return err
@@ -487,7 +494,7 @@ func (testenv *TestCaseEnv) createLicenseConfigMap() error {
testenv.Log.Info("New License Config Map created.", "License Config Map Name", testenv.namespace)
testenv.pushCleanupFunc(func() error {
- err := testenv.GetKubeClient().Delete(context.TODO(), lic)
+ err := testenv.GetKubeClient().Delete(context.Background(), lic)
if err != nil {
testenv.Log.Error(err, "Unable to delete license configmap ")
return err
@@ -500,14 +507,15 @@ func (testenv *TestCaseEnv) createLicenseConfigMap() error {
// CreateServiceAccount Create a service account with given name
func (testenv *TestCaseEnv) CreateServiceAccount(name string) error {
+ ctx := context.Background()
serviceAccountConfig := newServiceAccount(testenv.namespace, name)
- if err := testenv.GetKubeClient().Create(context.TODO(), serviceAccountConfig); err != nil {
+ if err := testenv.GetKubeClient().Create(ctx, serviceAccountConfig); err != nil {
testenv.Log.Error(err, "Unable to create service account")
return err
}
testenv.pushCleanupFunc(func() error {
- err := testenv.GetKubeClient().Delete(context.TODO(), serviceAccountConfig)
+ err := testenv.GetKubeClient().Delete(context.Background(), serviceAccountConfig)
if err != nil {
testenv.Log.Error(err, "Unable to delete service account")
return err
@@ -519,6 +527,7 @@ func (testenv *TestCaseEnv) CreateServiceAccount(name string) error {
// CreateIndexSecret create secret object
func (testenv *TestCaseEnv) createIndexSecret() error {
+ ctx := context.Background()
secretName := testenv.s3IndexSecret
ns := testenv.namespace
@@ -534,13 +543,13 @@ func (testenv *TestCaseEnv) createIndexSecret() error {
data := map[string][]byte{"s3_access_key": []byte(accessKey),
"s3_secret_key": []byte(secretKey)}
secret := newSecretSpec(ns, secretName, data)
- if err := testenv.GetKubeClient().Create(context.TODO(), secret); err != nil {
+ if err := testenv.GetKubeClient().Create(ctx, secret); err != nil {
testenv.Log.Error(err, "Unable to create s3 index secret object")
return err
}
testenv.pushCleanupFunc(func() error {
- err := testenv.GetKubeClient().Delete(context.TODO(), secret)
+ err := testenv.GetKubeClient().Delete(context.Background(), secret)
if err != nil {
testenv.Log.Error(err, "Unable to delete s3 index secret object")
return err
@@ -552,6 +561,7 @@ func (testenv *TestCaseEnv) createIndexSecret() error {
// CreateIndexSecret create secret object
func (testenv *TestCaseEnv) createIndexSecretGCP() error {
+ ctx := context.Background()
secretName := testenv.s3IndexSecret
ns := testenv.namespace
encodedString := os.Getenv("GCP_SERVICE_ACCOUNT_KEY")
@@ -562,13 +572,13 @@ func (testenv *TestCaseEnv) createIndexSecretGCP() error {
}
data := map[string][]byte{"key.json": []byte(gcpCredentials)}
secret := newSecretSpec(ns, secretName, data)
- if err := testenv.GetKubeClient().Create(context.TODO(), secret); err != nil {
+ if err := testenv.GetKubeClient().Create(ctx, secret); err != nil {
testenv.Log.Error(err, "Unable to create GCP index secret object")
return err
}
testenv.pushCleanupFunc(func() error {
- err := testenv.GetKubeClient().Delete(context.TODO(), secret)
+ err := testenv.GetKubeClient().Delete(context.Background(), secret)
if err != nil {
testenv.Log.Error(err, "Unable to delete GCP index secret object")
return err
@@ -580,18 +590,19 @@ func (testenv *TestCaseEnv) createIndexSecretGCP() error {
// createIndexSecretAzure create secret object for Azure
func (testenv *TestCaseEnv) createIndexSecretAzure() error {
+ ctx := context.Background()
secretName := testenv.s3IndexSecret
ns := testenv.namespace
data := map[string][]byte{"azure_sa_name": []byte(os.Getenv("STORAGE_ACCOUNT")),
"azure_sa_secret_key": []byte(os.Getenv("STORAGE_ACCOUNT_KEY"))}
secret := newSecretSpec(ns, secretName, data)
- if err := testenv.GetKubeClient().Create(context.TODO(), secret); err != nil {
+ if err := testenv.GetKubeClient().Create(ctx, secret); err != nil {
testenv.Log.Error(err, "Unable to create Azure index secret object")
return err
}
testenv.pushCleanupFunc(func() error {
- err := testenv.GetKubeClient().Delete(context.TODO(), secret)
+ err := testenv.GetKubeClient().Delete(context.Background(), secret)
if err != nil {
testenv.Log.Error(err, "Unable to delete Azure index secret object")
return err
@@ -603,6 +614,7 @@ func (testenv *TestCaseEnv) createIndexSecretAzure() error {
// CreateIndexIngestSepSecret creates secret object
func (testenv *TestCaseEnv) createIndexIngestSepSecret() error {
+ ctx := context.Background()
secretName := testenv.indexIngestSepSecret
ns := testenv.namespace
@@ -610,13 +622,13 @@ func (testenv *TestCaseEnv) createIndexIngestSepSecret() error {
"s3_secret_key": []byte(os.Getenv("AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY"))}
secret := newSecretSpec(ns, secretName, data)
- if err := testenv.GetKubeClient().Create(context.TODO(), secret); err != nil {
+ if err := testenv.GetKubeClient().Create(ctx, secret); err != nil {
testenv.Log.Error(err, "Unable to create index and ingestion sep secret object")
return err
}
testenv.pushCleanupFunc(func() error {
- err := testenv.GetKubeClient().Delete(context.TODO(), secret)
+ err := testenv.GetKubeClient().Delete(context.Background(), secret)
if err != nil {
testenv.Log.Error(err, "Unable to delete index and ingestion sep secret object")
return err
diff --git a/test/testenv/testenv.go b/test/testenv/testenv.go
index 06fe304d4..f95906e47 100644
--- a/test/testenv/testenv.go
+++ b/test/testenv/testenv.go
@@ -51,9 +51,12 @@ const (
// defaultTestTimeout is the max timeout in seconds before async test failed.
defaultTestTimeout = 1000000
- // PollInterval specifies the polling interval
+ // PollInterval specifies the polling interval for slow operations (waiting for full cluster readiness)
PollInterval = 5 * time.Second
+ // ShortPollInterval specifies the polling interval for fast-transitioning states
+ ShortPollInterval = 2 * time.Second
+
// ConsistentPollInterval is the interval to use to consistently check a state is stable
ConsistentPollInterval = 200 * time.Millisecond
@@ -63,6 +66,10 @@ const (
// DefaultTimeout is the max timeout before we failed.
DefaultTimeout = 2000 * time.Minute
+ // AppInstallTimeout is the timeout for waiting for apps to reach Install phase on a CR.
+ // C3 deployments require bundle push across all indexers and SHC deployer which can exceed 5 minutes.
+ AppInstallTimeout = 10 * time.Minute
+
// SearchHeadPod Template String for search head pod
SearchHeadPod = "splunk-%s-shc-search-head-%d"
diff --git a/test/testenv/verificationutils.go b/test/testenv/verificationutils.go
index cb611254d..9f093bfe4 100644
--- a/test/testenv/verificationutils.go
+++ b/test/testenv/verificationutils.go
@@ -22,12 +22,14 @@ import (
"fmt"
"math/rand"
"os/exec"
- "sigs.k8s.io/controller-runtime/pkg/client"
"strings"
"time"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
gomega "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
+ wait "k8s.io/apimachinery/pkg/util/wait"
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
@@ -35,8 +37,6 @@ import (
logf "sigs.k8s.io/controller-runtime/pkg/log"
)
-var StabilizationDuration = time.Second * 20
-
// PodDetailsStruct captures output of kubectl get pods podname -o json
type PodDetailsStruct struct {
Spec struct {
@@ -85,9 +85,6 @@ func VerifyMonitoringConsoleReady(ctx context.Context, deployment *Deployment, m
return monitoringConsole.Status.Phase
}, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady))
- // Stabilization period
- time.Sleep(StabilizationDuration)
-
// In a steady state, we should stay in Ready and not flip-flop around
gomega.Consistently(func() enterpriseApi.Phase {
_ = deployment.GetInstance(ctx, mcName, monitoringConsole)
@@ -121,38 +118,16 @@ func StandaloneReady(ctx context.Context, deployment *Deployment, deploymentName
func SearchHeadClusterReady(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv) {
shc := &enterpriseApi.SearchHeadCluster{}
instanceName := fmt.Sprintf("%s-shc", deployment.GetName())
- gomega.Eventually(func() enterpriseApi.Phase {
- err := deployment.GetInstance(ctx, instanceName, shc)
- if err != nil {
- return enterpriseApi.PhaseError
- }
- testenvInstance.Log.Info("Waiting for Search head cluster phase to be ready", "instance", shc.ObjectMeta.Name, "Phase", shc.Status.Phase)
- DumpGetPods(testenvInstance.GetName())
-
- return shc.Status.Phase
- }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady))
-
- gomega.Eventually(func() enterpriseApi.Phase {
- err := deployment.GetInstance(ctx, instanceName, shc)
- if err != nil {
- return enterpriseApi.PhaseError
- }
- testenvInstance.Log.Info("Waiting for Deployer phase to be ready", "instance", shc.ObjectMeta.Name, "Phase", shc.Status.DeployerPhase)
- DumpGetPods(testenvInstance.GetName())
-
- return shc.Status.DeployerPhase
- }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady))
-
- gomega.Eventually(func() enterpriseApi.Phase {
+ gomega.Eventually(func() bool {
err := deployment.GetInstance(ctx, instanceName, shc)
if err != nil {
- return enterpriseApi.PhaseError
+ return false
}
- testenvInstance.Log.Info("Waiting for Search Head Cluster phase to be ready", "instance", shc.ObjectMeta.Name, "Phase", shc.Status.Phase)
+ testenvInstance.Log.Info("Waiting for Search Head Cluster and Deployer phase to be ready", "instance", shc.ObjectMeta.Name, "Phase", shc.Status.Phase, "DeployerPhase", shc.Status.DeployerPhase)
DumpGetPods(testenvInstance.GetName())
- return shc.Status.Phase
- }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady))
+ return shc.Status.Phase == enterpriseApi.PhaseReady && shc.Status.DeployerPhase == enterpriseApi.PhaseReady
+ }, deployment.GetTimeout(), PollInterval).Should(gomega.BeTrue())
// In a steady state, we should stay in Ready and not flip-flop around
gomega.Consistently(func() enterpriseApi.Phase {
@@ -337,7 +312,7 @@ func VerifyRFSFMet(ctx context.Context, deployment *Deployment, testenvInstance
rfSfStatus := CheckRFSF(ctx, deployment)
testenvInstance.Log.Info("Verifying RF SF is met", "Status", rfSfStatus)
return rfSfStatus
- }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true))
+ }, deployment.GetTimeout(), ShortPollInterval).Should(gomega.Equal(true))
}
// VerifyNoDisconnectedSHPresentOnCM is present on cluster manager
@@ -355,7 +330,7 @@ func VerifyNoSHCInNamespace(deployment *Deployment, testenvInstance *TestCaseEnv
shcStatus := SHCInNamespace(testenvInstance.GetName())
testenvInstance.Log.Info("Verifying no Search Head Cluster is present in namespace", "Status", shcStatus)
return shcStatus
- }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(false))
+ }, deployment.GetTimeout(), ShortPollInterval).Should(gomega.Equal(false))
}
// LicenseManagerReady verify LM is in ready status and does not flip flop
@@ -478,7 +453,7 @@ func VerifyRollingRestartFinished(ctx context.Context, deployment *Deployment) {
rollingRestartStatus := CheckRollingRestartStatus(ctx, deployment)
logf.Log.Info("Rolling Restart Status", "Active", rollingRestartStatus)
return rollingRestartStatus
- }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true))
+ }, deployment.GetTimeout(), ShortPollInterval).Should(gomega.Equal(true))
}
// VerifyConfOnPod Verify give conf and value on config file on pod
@@ -511,7 +486,7 @@ func VerifySearchHeadClusterPhase(ctx context.Context, deployment *Deployment, t
DumpGetPods(testenvInstance.GetName())
return shc.Status.Phase
- }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseScalingUp))
+ }, deployment.GetTimeout(), ShortPollInterval).Should(gomega.Equal(enterpriseApi.PhaseScalingUp))
}
// VerifyIndexerClusterPhase verify the phase of idxc matches the given phase
@@ -526,7 +501,7 @@ func VerifyIndexerClusterPhase(ctx context.Context, deployment *Deployment, test
DumpGetPods(testenvInstance.GetName())
return idxc.Status.Phase
- }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase))
+ }, deployment.GetTimeout(), ShortPollInterval).Should(gomega.Equal(phase))
}
// VerifyStandalonePhase verify the phase of Standalone CR
@@ -541,7 +516,7 @@ func VerifyStandalonePhase(ctx context.Context, deployment *Deployment, testenvI
DumpGetPods(testenvInstance.GetName())
return standalone.Status.Phase
- }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase))
+ }, deployment.GetTimeout(), ShortPollInterval).Should(gomega.Equal(phase))
}
// VerifyMonitoringConsolePhase verify the phase of Monitoring Console CR
@@ -556,7 +531,7 @@ func VerifyMonitoringConsolePhase(ctx context.Context, deployment *Deployment, t
DumpGetPods(testenvInstance.GetName())
return mc.Status.Phase
- }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase))
+ }, deployment.GetTimeout(), ShortPollInterval).Should(gomega.Equal(phase))
}
// GetResourceVersion get resource version id
@@ -651,7 +626,7 @@ func VerifyCustomResourceVersionChanged(ctx context.Context, deployment *Deploym
DumpGetPods(testenvInstance.GetName())
return newResourceVersion
- }, deployment.GetTimeout(), PollInterval).ShouldNot(gomega.Equal(resourceVersion))
+ }, deployment.GetTimeout(), ShortPollInterval).ShouldNot(gomega.Equal(resourceVersion))
}
// VerifyCPULimits verifies value of CPU limits is as expected
@@ -736,7 +711,7 @@ func VerifyClusterManagerPhase(ctx context.Context, deployment *Deployment, test
// Test ClusterManager Phase to see if its ready
return cm.Status.Phase
- }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase))
+ }, deployment.GetTimeout(), ShortPollInterval).Should(gomega.Equal(phase))
}
// VerifyClusterMasterPhase verify phase of cluster manager
@@ -752,7 +727,7 @@ func VerifyClusterMasterPhase(ctx context.Context, deployment *Deployment, teste
// Test ClusterManager Phase to see if its ready
return cm.Status.Phase
- }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase))
+ }, deployment.GetTimeout(), ShortPollInterval).Should(gomega.Equal(phase))
}
// VerifySecretsOnPods Check whether the secret object info is mounted on given pods
@@ -1332,3 +1307,214 @@ func TriggerTelemetrySubmission(ctx context.Context, deployment *Deployment) {
logf.Log.Info("Successfully updated telemetry ConfigMap", "key", testKey, "value", jsonValue)
}
+
+// WaitForEvent waits for an event instead of relying on time
+func WaitForEvent(ctx context.Context, deployment *Deployment, namespace, crName, eventReason string, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
+ eventList := &corev1.EventList{}
+ err := deployment.testenv.GetKubeClient().List(ctx, eventList, client.InNamespace(namespace))
+ if err != nil {
+ return false, nil
+ }
+
+ for _, event := range eventList.Items {
+ if event.InvolvedObject.Name == crName && event.Reason == eventReason {
+ return true, nil
+ }
+ }
+ return false, nil
+ })
+}
+
+// WaitForClusterManagerPhase waits for ClusterManager to reach expected phase
+func WaitForClusterManagerPhase(ctx context.Context, deployment *Deployment, namespace, crName string, expectedPhase enterpriseApi.Phase, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
+ cr := &enterpriseApi.ClusterManager{}
+ err := deployment.testenv.GetKubeClient().Get(ctx, client.ObjectKey{Name: crName, Namespace: namespace}, cr)
+ if err != nil {
+ return false, nil
+ }
+ return cr.Status.Phase == expectedPhase, nil
+ })
+}
+
+// WaitForSearchHeadClusterPhase waits for SearchHeadCluster to reach expected phase
+func WaitForSearchHeadClusterPhase(ctx context.Context, deployment *Deployment, namespace, crName string, expectedPhase enterpriseApi.Phase, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
+ cr := &enterpriseApi.SearchHeadCluster{}
+ err := deployment.testenv.GetKubeClient().Get(ctx, client.ObjectKey{Name: crName, Namespace: namespace}, cr)
+ if err != nil {
+ return false, nil
+ }
+ return cr.Status.Phase == expectedPhase, nil
+ })
+}
+
+// WaitForMonitoringConsolePhase waits for MonitoringConsole to reach expected phase
+func WaitForMonitoringConsolePhase(ctx context.Context, deployment *Deployment, namespace, crName string, expectedPhase enterpriseApi.Phase, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
+ cr := &enterpriseApi.MonitoringConsole{}
+ err := deployment.testenv.GetKubeClient().Get(ctx, client.ObjectKey{Name: crName, Namespace: namespace}, cr)
+ if err != nil {
+ return false, nil
+ }
+ return cr.Status.Phase == expectedPhase, nil
+ })
+}
+
+// WaitForClusterInitialized waits for ClusterInitialized event on IndexerCluster
+func WaitForClusterInitialized(ctx context.Context, deployment *Deployment, namespace, crName string, timeout time.Duration) error {
+ return WaitForEvent(ctx, deployment, namespace, crName, "ClusterInitialized", timeout)
+}
+
+// WaitForScaledUp waits for ScaledUp event on a CR (Standalone, IndexerCluster, SearchHeadCluster)
+func WaitForScaledUp(ctx context.Context, deployment *Deployment, namespace, crName string, timeout time.Duration) error {
+ return WaitForEvent(ctx, deployment, namespace, crName, "ScaledUp", timeout)
+}
+
+// WaitForScaledDown waits for ScaledDown event on a CR (Standalone, IndexerCluster, SearchHeadCluster)
+func WaitForScaledDown(ctx context.Context, deployment *Deployment, namespace, crName string, timeout time.Duration) error {
+ return WaitForEvent(ctx, deployment, namespace, crName, "ScaledDown", timeout)
+}
+
+// WaitForPasswordSyncCompleted waits for PasswordSyncCompleted event on IndexerCluster or SearchHeadCluster
+func WaitForPasswordSyncCompleted(ctx context.Context, deployment *Deployment, namespace, crName string, timeout time.Duration) error {
+ return WaitForEvent(ctx, deployment, namespace, crName, "PasswordSyncCompleted", timeout)
+}
+
+// WaitForPodsInMCConfigMap waits for pods to appear in MC ConfigMap instead of using time.Sleep
+func WaitForPodsInMCConfigMap(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, pods []string, key string, mcName string, expected bool, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, PollInterval, timeout, true, func(ctx context.Context) (bool, error) {
+ mcConfigMap, err := GetMCConfigMap(ctx, deployment, testenvInstance.GetName(), mcName)
+ if err != nil {
+ return false, nil
+ }
+ for _, podName := range pods {
+ found := CheckPodNameInString(podName, mcConfigMap.Data[key])
+ if found != expected {
+ return false, nil
+ }
+ }
+ return true, nil
+ })
+}
+
+// WaitForPodsInMCConfigString waits for pods to appear in MC config string instead of using time.Sleep
+func WaitForPodsInMCConfigString(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, pods []string, mcName string, expected bool, checkPodIP bool, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, PollInterval, timeout, true, func(ctx context.Context) (bool, error) {
+ for _, podName := range pods {
+ var found bool
+ if checkPodIP {
+ podIP := GetPodIP(testenvInstance.GetName(), podName)
+ found = CheckPodNameOnMC(testenvInstance.GetName(), mcName, podIP)
+ } else {
+ found = CheckPodNameOnMC(testenvInstance.GetName(), mcName, podName)
+ }
+ if found != expected {
+ return false, nil
+ }
+ }
+ return true, nil
+ })
+}
+
+// WaitForAppPhase waits for an app to reach a specific phase on a CR
+func WaitForAppPhase(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, crName string, crKind string, appSourceName string, appName string, expectedPhase enterpriseApi.AppPhaseType, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, PollInterval, timeout, true, func(ctx context.Context) (bool, error) {
+ appDeploymentInfo, err := GetAppDeploymentInfo(ctx, deployment, testenvInstance, crName, crKind, appSourceName, appName)
+ if err != nil {
+ return false, nil
+ }
+ return appDeploymentInfo.PhaseInfo.Phase == expectedPhase, nil
+ })
+}
+
+// WaitForAllAppsPhase waits for all apps in a list to reach a specific phase
+func WaitForAllAppsPhase(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, crName string, crKind string, appSourceName string, appList []string, expectedPhase enterpriseApi.AppPhaseType, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, PollInterval, timeout, true, func(ctx context.Context) (bool, error) {
+ for _, appName := range appList {
+ appDeploymentInfo, err := GetAppDeploymentInfo(ctx, deployment, testenvInstance, crName, crKind, appSourceName, appName)
+ if err != nil {
+ return false, nil
+ }
+ if appDeploymentInfo.PhaseInfo.Phase != expectedPhase {
+ return false, nil
+ }
+ }
+ return true, nil
+ })
+}
+
+// WaitForStandalonePhase waits for Standalone to reach expected phase
+func WaitForStandalonePhase(ctx context.Context, deployment *Deployment, namespace, crName string, expectedPhase enterpriseApi.Phase, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, PollInterval, timeout, true, func(ctx context.Context) (bool, error) {
+ cr := &enterpriseApi.Standalone{}
+ err := deployment.testenv.GetKubeClient().Get(ctx, client.ObjectKey{Name: crName, Namespace: namespace}, cr)
+ if err != nil {
+ return false, nil
+ }
+ return cr.Status.Phase == expectedPhase, nil
+ })
+}
+
+// WaitForLicenseManagerPhase waits for LicenseManager to reach expected phase
+func WaitForLicenseManagerPhase(ctx context.Context, deployment *Deployment, namespace, crName string, expectedPhase enterpriseApi.Phase, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, PollInterval, timeout, true, func(ctx context.Context) (bool, error) {
+ cr := &enterpriseApi.LicenseManager{}
+ err := deployment.testenv.GetKubeClient().Get(ctx, client.ObjectKey{Name: crName, Namespace: namespace}, cr)
+ if err != nil {
+ return false, nil
+ }
+ return cr.Status.Phase == expectedPhase, nil
+ })
+}
+
+// WaitForLicenseMasterPhase waits for LicenseMaster to reach expected phase
+func WaitForLicenseMasterPhase(ctx context.Context, deployment *Deployment, namespace, crName string, expectedPhase enterpriseApi.Phase, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, PollInterval, timeout, true, func(ctx context.Context) (bool, error) {
+ cr := &enterpriseApiV3.LicenseMaster{}
+ err := deployment.testenv.GetKubeClient().Get(ctx, client.ObjectKey{Name: crName, Namespace: namespace}, cr)
+ if err != nil {
+ return false, nil
+ }
+ return cr.Status.Phase == expectedPhase, nil
+ })
+}
+
+// WaitForIndexerClusterPhase waits for IndexerCluster to reach expected phase
+func WaitForIndexerClusterPhase(ctx context.Context, deployment *Deployment, namespace, crName string, expectedPhase enterpriseApi.Phase, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, PollInterval, timeout, true, func(ctx context.Context) (bool, error) {
+ cr := &enterpriseApi.IndexerCluster{}
+ err := deployment.testenv.GetKubeClient().Get(ctx, client.ObjectKey{Name: crName, Namespace: namespace}, cr)
+ if err != nil {
+ return false, nil
+ }
+ return cr.Status.Phase == expectedPhase, nil
+ })
+}
+
+// WaitForSearchResultsNonEmpty waits for search results to return a non-empty "result" field
+func WaitForSearchResultsNonEmpty(ctx context.Context, deployment *Deployment, podName string, searchString string, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(ctx, PollInterval, timeout, true, func(ctx context.Context) (bool, error) {
+ searchResultsResp, err := PerformSearchSync(ctx, podName, searchString, deployment)
+ if err != nil {
+ return false, nil
+ }
+ var searchResults map[string]interface{}
+ if jsonErr := json.Unmarshal([]byte(searchResultsResp), &searchResults); jsonErr != nil {
+ return false, nil
+ }
+ return searchResults["result"] != nil, nil
+ })
+}
+
+// WaitForPodExecSuccess retries pod exec command until success or timeout
+func WaitForPodExecSuccess(ctx context.Context, deployment *Deployment, podName string, command []string, stdin string, timeout time.Duration) (string, error) {
+ var stdout string
+ err := wait.PollUntilContextTimeout(ctx, PollInterval, timeout, true, func(ctx context.Context) (bool, error) {
+ var err error
+ stdout, _, err = deployment.PodExecCommand(ctx, podName, command, stdin, false)
+ return err == nil, nil
+ })
+ return stdout, err
+}