diff --git a/.biased_lang_exclude b/.biased_lang_exclude
index 32a3a3699..80b251425 100644
--- a/.biased_lang_exclude
+++ b/.biased_lang_exclude
@@ -19,6 +19,7 @@ config/examples/licensemaster/default.yaml
config/rbac/licensemaster_editor_role.yaml
config/rbac/licensemaster_viewer_role.yaml
config/samples/enterprise_v3_licensemaster.yaml
+config/webhook/manifests.yaml
tools/make_bundle.sh
config/samples/kustomization.yaml
config/manifests/bases/splunk-operator.clusterserviceversion.yaml
diff --git a/.env b/.env
index feb155eea..28360f2d8 100644
--- a/.env
+++ b/.env
@@ -1,6 +1,6 @@
-OPERATOR_SDK_VERSION=v1.39.0
-REVIEWERS=vivekr-splunk,rlieberman-splunk,patrykw-splunk,Igor-splunk,kasiakoziol,kubabuczak
-GO_VERSION=1.25.5
+OPERATOR_SDK_VERSION=v1.42.0
+REVIEWERS=vivekr-splunk,rlieberman-splunk,patrykw-splunk,Igor-splunk,kasiakoziol,kubabuczak,gabrielm-splunk,minjieqiu,qingw-splunk
+GO_VERSION=1.25.7
AWSCLI_URL=https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.8.6.zip
KUBECTL_VERSION=v1.29.1
AZ_CLI_VERSION=2.79.0
diff --git a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml
index de972ba59..4369ac1eb 100644
--- a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml
+++ b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml
@@ -49,6 +49,7 @@ jobs:
- name: Run Unit Tests
run: make test
- name: Run Code Coverage
+ if: ${{ secrets.COVERALLS_TOKEN != '' }}
run: goveralls -coverprofile=coverage.out -service=circle-ci -repotoken ${{ secrets.COVERALLS_TOKEN }}
- name: Upload Coverage artifacts
uses: actions/upload-artifact@v4.4.0
@@ -121,6 +122,7 @@ jobs:
appframeworksS1,
managersecret,
managermc,
+ indingsep,
]
runs-on: ubuntu-latest
env:
@@ -146,6 +148,8 @@ jobs:
DEPLOYMENT_TYPE: ""
ARM64: "true"
GRAVITON_TESTING: "true"
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- name: Chekcout code
uses: actions/checkout@v2
diff --git a/.github/workflows/arm-AL2023-int-test-workflow.yml b/.github/workflows/arm-AL2023-int-test-workflow.yml
index b0bd87391..2697ff1af 100644
--- a/.github/workflows/arm-AL2023-int-test-workflow.yml
+++ b/.github/workflows/arm-AL2023-int-test-workflow.yml
@@ -68,6 +68,7 @@ jobs:
managercrcrud,
licensemanager,
managerdeletecr,
+ indingsep,
]
runs-on: ubuntu-latest
needs: build-operator-image-arm-al2023
@@ -93,6 +94,8 @@ jobs:
DEPLOYMENT_TYPE: ""
ARM64: "true"
GRAVITON_TESTING: "true"
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- name: Set Test Cluster Nodes and Parallel Runs
run: >-
diff --git a/.github/workflows/arm-RHEL-build-test-push-workflow.yml b/.github/workflows/arm-RHEL-build-test-push-workflow.yml
index 947681124..f860f1c43 100644
--- a/.github/workflows/arm-RHEL-build-test-push-workflow.yml
+++ b/.github/workflows/arm-RHEL-build-test-push-workflow.yml
@@ -69,6 +69,7 @@ jobs:
managercrcrud,
licensemanager,
managerdeletecr,
+ indingsep,
]
runs-on: ubuntu-latest
needs: build-operator-image-arm-rhel
@@ -94,6 +95,8 @@ jobs:
DEPLOYMENT_TYPE: ""
ARM64: "true"
GRAVITON_TESTING: "true"
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- name: Set Test Cluster Nodes and Parallel Runs
run: >-
diff --git a/.github/workflows/arm-RHEL-int-test-workflow.yml b/.github/workflows/arm-RHEL-int-test-workflow.yml
index 4ba671c50..ff9baddcb 100644
--- a/.github/workflows/arm-RHEL-int-test-workflow.yml
+++ b/.github/workflows/arm-RHEL-int-test-workflow.yml
@@ -68,6 +68,7 @@ jobs:
managercrcrud,
licensemanager,
managerdeletecr,
+ indingsep,
]
runs-on: ubuntu-latest
needs: build-operator-image-arm-rhel
@@ -93,6 +94,8 @@ jobs:
DEPLOYMENT_TYPE: ""
ARM64: "true"
GRAVITON_TESTING: "true"
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- name: Set Test Cluster Nodes and Parallel Runs
run: >-
diff --git a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml
index cecd1539f..b27ac0edb 100644
--- a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml
+++ b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml
@@ -49,6 +49,7 @@ jobs:
- name: Run Unit Tests
run: make test
- name: Run Code Coverage
+ if: ${{ secrets.COVERALLS_TOKEN != '' }}
run: goveralls -coverprofile=coverage.out -service=circle-ci -repotoken ${{ secrets.COVERALLS_TOKEN }}
- name: Upload Coverage artifacts
uses: actions/upload-artifact@v4.4.0
@@ -121,6 +122,7 @@ jobs:
appframeworksS1,
managersecret,
managermc,
+ indingsep,
]
runs-on: ubuntu-latest
env:
@@ -146,6 +148,8 @@ jobs:
DEPLOYMENT_TYPE: ""
ARM64: "true"
GRAVITON_TESTING: "true"
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- name: Chekcout code
uses: actions/checkout@v2
diff --git a/.github/workflows/arm-Ubuntu-int-test-workflow.yml b/.github/workflows/arm-Ubuntu-int-test-workflow.yml
index f4a1ce18c..fe0a69e91 100644
--- a/.github/workflows/arm-Ubuntu-int-test-workflow.yml
+++ b/.github/workflows/arm-Ubuntu-int-test-workflow.yml
@@ -68,6 +68,7 @@ jobs:
managercrcrud,
licensemanager,
managerdeletecr,
+ indingsep,
]
runs-on: ubuntu-latest
needs: build-operator-image-arm-ubuntu
@@ -93,6 +94,8 @@ jobs:
DEPLOYMENT_TYPE: ""
ARM64: "true"
GRAVITON_TESTING: "true"
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- name: Set Test Cluster Nodes and Parallel Runs
run: >-
diff --git a/.github/workflows/build-test-push-workflow.yml b/.github/workflows/build-test-push-workflow.yml
index f392cd98a..8cf8aeab6 100644
--- a/.github/workflows/build-test-push-workflow.yml
+++ b/.github/workflows/build-test-push-workflow.yml
@@ -5,11 +5,17 @@ permissions:
id-token: write
pull-requests: write
on:
- pull_request: {}
+ pull_request:
+ paths-ignore:
+ - 'docs/**'
+ - '*.md'
push:
branches:
- main
- develop
+ paths-ignore:
+ - 'docs/**'
+ - '*.md'
jobs:
check-formating:
runs-on: ubuntu-latest
@@ -23,12 +29,17 @@ jobs:
with:
go-version: ${{ steps.dotenv.outputs.GO_VERSION }}
- name: Check Source formatting
- run: make fmt && if [[ $? -ne 0 ]]; then false; fi
+ run: |
+ UNFORMATTED=$(gofmt -l .)
+ if [ -n "$UNFORMATTED" ]; then
+ echo "These files are not formatted:"
+ echo "$UNFORMATTED"
+ exit 1
+ fi
- name: Lint source code
run: make vet && if [[ $? -ne 0 ]]; then false; fi
unit-tests:
runs-on: ubuntu-latest
- needs: check-formating
steps:
- uses: actions/checkout@v2
- name: Dotenv Action
@@ -50,7 +61,8 @@ jobs:
run: |
make test
- name: Run Code Coverage
- run: goveralls -coverprofile=coverage.out -service=circle-ci -repotoken ${{ secrets.COVERALLS_TOKEN }}
+ if: ${{ secrets.COVERALLS_TOKEN != '' }}
+ run: goveralls -shallow -coverprofile=coverage.out -service=circle-ci -repotoken ${{ secrets.COVERALLS_TOKEN }}
- name: Upload Coverage artifacts
uses: actions/upload-artifact@v4.4.0
with:
@@ -58,7 +70,7 @@ jobs:
path: coverage.out
build-operator-image:
runs-on: ubuntu-latest
- needs: unit-tests
+ needs: [check-formating, unit-tests]
env:
SPLUNK_ENTERPRISE_IMAGE: ${{ secrets.SPLUNK_ENTERPRISE_IMAGE }}
SPLUNK_OPERATOR_IMAGE_NAME: splunk/splunk-operator
@@ -174,6 +186,7 @@ jobs:
managerappframeworkm4,
managersecret,
managermc,
+ indingsep,
]
runs-on: ubuntu-latest
env:
@@ -197,6 +210,8 @@ jobs:
EKS_SSH_PUBLIC_KEY: ${{ secrets.EKS_SSH_PUBLIC_KEY }}
CLUSTER_WIDE: "true"
DEPLOYMENT_TYPE: ""
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- name: Chekcout code
uses: actions/checkout@v2
diff --git a/.github/workflows/cla-check.yml b/.github/workflows/cla-check.yml
index b84ca13b1..336080214 100644
--- a/.github/workflows/cla-check.yml
+++ b/.github/workflows/cla-check.yml
@@ -22,29 +22,33 @@ jobs:
PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
with:
path-to-signatures: "signatures/version1/cla.json"
- path-to-document: "https://github.com/splunk/cla-agreement/blob/main/CLA.md" # e.g. a CLA or a DCO document
+ path-to-document: "https://github.com/splunk/cla-agreement/blob/main/CLA.md"
branch: "main"
allowlist: dependabot[bot]
remote-organization-name: splunk
remote-repository-name: cla-agreement
+ custom-notsigned-prcomment: "
Thank you for your submission, we really appreciate it. Like many open-source projects, we ask that you sign our [Contribution License Agreement](${input.getPathToDocument()}) before we can accept your contribution. You can sign the CLA by just posting a Pull Request Comment with the exact sentence copied from below.
"
+ custom-allsigned-prcomment: "⏳ **CLA signed** — now checking Code of Conduct status..."
CodeOfConduct:
runs-on: ubuntu-latest
+ # CLA and COC jobs both edit the same PR comment to show signing status.
+ # Run sequentially to avoid race conditions when updating the comment.
+ needs: ContributorLicenseAgreement
steps:
- name: "COC Assistant"
- if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the Code of Conduct and I hereby accept the Terms') || github.event_name == 'pull_request_target'
+ if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the Code of Conduct and I hereby sign the COC') || github.event_name == 'pull_request_target'
uses: cla-assistant/github-action@v2.1.3-beta
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
with:
path-to-signatures: "signatures/version1/coc.json"
- path-to-document: "https://github.com/splunk/cla-agreement/blob/main/CODE_OF_CONDUCT.md" # e.g. a COC or a DCO document
+ path-to-document: "https://github.com/splunk/cla-agreement/blob/main/CODE_OF_CONDUCT.md"
branch: "main"
allowlist: dependabot[bot]
remote-organization-name: splunk
remote-repository-name: cla-agreement
- custom-pr-sign-comment: "I have read the Code of Conduct and I hereby accept the Terms"
- create-file-commit-message: "For example: Creating file for storing COC Signatures"
+ custom-pr-sign-comment: "I have read the Code of Conduct and I hereby sign the COC"
signed-commit-message: "$contributorName has signed the COC in #$pullRequestNo"
- custom-notsigned-prcomment: "All contributors have NOT signed the COC Document"
- custom-allsigned-prcomment: "****CLA Assistant Lite bot**** All contributors have signed the COC ✍️ ✅"
\ No newline at end of file
+ custom-notsigned-prcomment: "
🎉 **CLA signed — one more step to go!**
Please also accept our [Code of Conduct](${input.getPathToDocument()}) by posting a comment with the exact sentence copied from below. This helps us maintain a welcoming community.
"
+ custom-allsigned-prcomment: "All contributors have signed required documents ✍️ ✅"
\ No newline at end of file
diff --git a/.github/workflows/distroless-build-test-push-workflow.yml b/.github/workflows/distroless-build-test-push-workflow.yml
index ef652f5b9..0912a341d 100644
--- a/.github/workflows/distroless-build-test-push-workflow.yml
+++ b/.github/workflows/distroless-build-test-push-workflow.yml
@@ -5,11 +5,17 @@ permissions:
id-token: write
pull-requests: write
on:
- pull_request: {}
+ pull_request:
+ paths-ignore:
+ - 'docs/**'
+ - '*.md'
push:
branches:
- - main
- - develop
+ - main
+ - develop
+ paths-ignore:
+ - 'docs/**'
+ - '*.md'
jobs:
check-formating:
runs-on: ubuntu-latest
@@ -49,7 +55,8 @@ jobs:
- name: Run Unit Tests
run: make test
- name: Run Code Coverage
- run: goveralls -coverprofile=coverage.out -service=circle-ci -repotoken ${{ secrets.COVERALLS_TOKEN }}
+ if: ${{ secrets.COVERALLS_TOKEN != '' }}
+ run: goveralls -shallow -coverprofile=coverage.out -service=circle-ci -repotoken ${{ secrets.COVERALLS_TOKEN }}
- name: Upload Coverage artifacts
uses: actions/upload-artifact@v4.4.0
with:
@@ -174,6 +181,7 @@ jobs:
managerappframeworkm4,
managersecret,
managermc,
+ indingsep,
]
runs-on: ubuntu-latest
env:
@@ -197,6 +205,8 @@ jobs:
EKS_SSH_PUBLIC_KEY: ${{ secrets.EKS_SSH_PUBLIC_KEY }}
CLUSTER_WIDE: "true"
DEPLOYMENT_TYPE: ""
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- name: Chekcout code
uses: actions/checkout@v2
diff --git a/.github/workflows/distroless-int-test-workflow.yml b/.github/workflows/distroless-int-test-workflow.yml
index 0dea5b263..6238aed14 100644
--- a/.github/workflows/distroless-int-test-workflow.yml
+++ b/.github/workflows/distroless-int-test-workflow.yml
@@ -9,6 +9,9 @@ on:
branches:
- develop
- main
+ paths-ignore:
+ - 'docs/**'
+ - '*.md'
jobs:
build-operator-image-distroless:
runs-on: ubuntu-latest
@@ -69,6 +72,7 @@ jobs:
managercrcrud,
licensemanager,
managerdeletecr,
+ indingsep,
]
runs-on: ubuntu-latest
needs: build-operator-image-distroless
@@ -92,6 +96,8 @@ jobs:
S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
CLUSTER_WIDE: "true"
DEPLOYMENT_TYPE: ""
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- name: Set Test Cluster Nodes and Parallel Runs
run: >-
diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml
index e0ed442a4..8f7549ab5 100644
--- a/.github/workflows/helm-test-workflow.yml
+++ b/.github/workflows/helm-test-workflow.yml
@@ -10,6 +10,9 @@ on:
- develop
- main
- feature**
+ paths-ignore:
+ - 'docs/**'
+ - '*.md'
workflow_dispatch:
jobs:
build-operator-image:
@@ -72,6 +75,8 @@ jobs:
HELM_REPO_PATH: "../../../../helm-chart"
INSTALL_OPERATOR: "true"
TEST_VPC_ENDPOINT_URL: ${{ secrets.TEST_VPC_ENDPOINT_URL }}
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- uses: chrisdickinson/setup-yq@3d931309f27270ebbafd53f2daee773a82ea1822
- name: Checking YQ installation
@@ -113,8 +118,8 @@ jobs:
version: ${{ steps.dotenv.outputs.KUBECTL_VERSION }}
- name: Install kuttl
run: |
- sudo curl -LO https://github.com/kudobuilder/kuttl/releases/download/v0.12.0/kuttl_0.12.0_linux_x86_64.tar.gz
- sudo tar -xvzf kuttl_0.12.0_linux_x86_64.tar.gz
+ sudo curl -LO https://github.com/kudobuilder/kuttl/releases/download/v0.22.0/kuttl_0.22.0_linux_x86_64.tar.gz
+ sudo tar -xvzf kuttl_0.22.0_linux_x86_64.tar.gz
sudo chmod +x kubectl-kuttl
sudo mv kubectl-kuttl /usr/local/bin/kubectl-kuttl
- name: Install Python
diff --git a/.github/workflows/int-test-azure-workflow.yml b/.github/workflows/int-test-azure-workflow.yml
index b58a04959..8b4d0e9a8 100644
--- a/.github/workflows/int-test-azure-workflow.yml
+++ b/.github/workflows/int-test-azure-workflow.yml
@@ -8,6 +8,9 @@ on:
branches:
- develop
- main
+ paths-ignore:
+ - 'docs/**'
+ - '*.md'
jobs:
build-operator-image:
runs-on: ubuntu-latest
diff --git a/.github/workflows/int-test-gcp-workflow.yml b/.github/workflows/int-test-gcp-workflow.yml
index bae27e97e..1d3dce0ce 100644
--- a/.github/workflows/int-test-gcp-workflow.yml
+++ b/.github/workflows/int-test-gcp-workflow.yml
@@ -9,6 +9,9 @@ on:
branches:
- develop
- main
+ paths-ignore:
+ - 'docs/**'
+ - '*.md'
jobs:
build-operator-image:
runs-on: ubuntu-latest
@@ -66,7 +69,7 @@ jobs:
create-cluster-and-run-tests:
strategy:
matrix:
- test_focus:
+ test_focus:
- { order: 1, name: "c3_gcp_sanity" }
- { order: 2, name: "c3_mgr_gcp_sanity" }
- { order: 3, name: "m4_gcp_sanity" }
@@ -128,7 +131,7 @@ jobs:
uses: google-github-actions/auth@v1
with:
credentials_json: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }}
-
+
- name: Set up Cloud SDK
uses: google-github-actions/setup-gcloud@v1
with:
@@ -187,7 +190,7 @@ jobs:
uses: actions/setup-go@v2
with:
go-version: ${{ steps.dotenv.outputs.GO_VERSION }}
-
+
- name: Install Go Lint
run: |
go version
@@ -207,7 +210,7 @@ jobs:
username: _json_key
password: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }}
- - name: Pull Splunk Enterprise Image
+ - name: Pull Splunk Enterprise Image
run: docker pull ${{ env.SPLUNK_ENTERPRISE_IMAGE }}
- name: Pull Splunk Operator Image Locally
@@ -230,7 +233,7 @@ jobs:
with:
cluster_name: ${{ env.CLUSTER_NAME }}
location: ${{ env.GCP_ZONE }}
-
+
- name: Install Metrics Server
run: |
curl -LO https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
@@ -248,7 +251,7 @@ jobs:
- name: Verify kubectl Configuration
run: |
kubectl config current-context
-
+
- name: Apply StorageClass
run: |
kubectl apply -f test/gcp-storageclass.yaml
diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml
index fabc2b4fa..4d0ef5379 100644
--- a/.github/workflows/int-test-workflow.yml
+++ b/.github/workflows/int-test-workflow.yml
@@ -10,6 +10,9 @@ on:
- develop
- main
- feature**
+ paths-ignore:
+ - 'docs/**'
+ - '*.md'
jobs:
build-operator-image:
runs-on: ubuntu-latest
@@ -66,6 +69,7 @@ jobs:
managercrcrud,
licensemanager,
managerdeletecr,
+ indingsep,
]
runs-on: ubuntu-latest
needs: build-operator-image
@@ -88,6 +92,8 @@ jobs:
S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
CLUSTER_WIDE: "true"
DEPLOYMENT_TYPE: ""
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- name: Set Test Cluster Nodes and Parallel Runs
run: >-
diff --git a/.github/workflows/kubectl-splunk-workflow.yml b/.github/workflows/kubectl-splunk-workflow.yml
index 70bc6fecf..8e61ecbcf 100644
--- a/.github/workflows/kubectl-splunk-workflow.yml
+++ b/.github/workflows/kubectl-splunk-workflow.yml
@@ -1,6 +1,6 @@
# .github/workflows/ci.yml
-name: Kubectl Splunk CI
+name: Kubectl Splunk CI
permissions:
contents: read
@@ -9,11 +9,11 @@ permissions:
on:
push:
- branches:
- - feature/CSPL-3152
+ branches:
+ - feature/CSPL-3152
pull_request:
- branches:
- - feature/CSPL-3152
+ branches:
+ - feature/CSPL-3152
jobs:
build-and-test:
diff --git a/.github/workflows/manual-int-test-workflow.yml b/.github/workflows/manual-int-test-workflow.yml
index dc6981e46..f5150b3ac 100644
--- a/.github/workflows/manual-int-test-workflow.yml
+++ b/.github/workflows/manual-int-test-workflow.yml
@@ -28,6 +28,7 @@ jobs:
managerscaling,
managercrcrud,
licensemanager,
+ indingsep,
]
runs-on: ubuntu-latest
env:
@@ -49,6 +50,8 @@ jobs:
PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }}
S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
CLUSTER_WIDE: ${{ github.event.inputs.CLUSTER_WIDE }}
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- name: Set Test Cluster Nodes and Parallel Runs
run: >-
diff --git a/.github/workflows/namespace-scope-int-workflow.yml b/.github/workflows/namespace-scope-int-workflow.yml
index 03cbc2b4f..b3576b1ee 100644
--- a/.github/workflows/namespace-scope-int-workflow.yml
+++ b/.github/workflows/namespace-scope-int-workflow.yml
@@ -24,6 +24,7 @@ jobs:
managerscaling,
managercrcrud,
licensemanager,
+ indingsep,
]
runs-on: ubuntu-latest
env:
@@ -44,6 +45,8 @@ jobs:
PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }}
S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
CLUSTER_WIDE: "false"
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- name: Set Test Cluster Nodes and Parallel Runs
run: >-
diff --git a/.github/workflows/nightly-int-test-workflow.yml b/.github/workflows/nightly-int-test-workflow.yml
index 769bac74a..6079816af 100644
--- a/.github/workflows/nightly-int-test-workflow.yml
+++ b/.github/workflows/nightly-int-test-workflow.yml
@@ -65,6 +65,7 @@ jobs:
managerscaling,
managercrcrud,
licensemanager,
+ indingsep,
]
runs-on: ubuntu-latest
needs: build-operator-image
@@ -86,6 +87,8 @@ jobs:
PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }}
S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
CLUSTER_WIDE: "true"
+ AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }}
+ AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }}
steps:
- name: Set Test Cluster Nodes and Parallel Runs
run: >-
diff --git a/.github/workflows/pre-release-workflow.yml b/.github/workflows/pre-release-workflow.yml
index b5b48bacc..bdf1cc9b2 100644
--- a/.github/workflows/pre-release-workflow.yml
+++ b/.github/workflows/pre-release-workflow.yml
@@ -249,4 +249,4 @@ jobs:
body: |
### Automated Pull Request for Splunk Operator Release ${{ github.event.inputs.release_version }}
* Changes added to docs/ChangeLog-NEW.md. Please filter and update ChangeLog.md
- * Delete ChangeLog-New.md
\ No newline at end of file
+ * Delete ChangeLog-New.md
diff --git a/.github/workflows/prodsec-workflow.yml b/.github/workflows/prodsec-workflow.yml
index 54942b0b2..777075246 100644
--- a/.github/workflows/prodsec-workflow.yml
+++ b/.github/workflows/prodsec-workflow.yml
@@ -4,11 +4,17 @@ permissions:
packages: write
pull-requests: write
on:
- pull_request: {}
+ pull_request:
+ paths-ignore:
+ - 'docs/**'
+ - '*.md'
push:
branches:
- main
- develop
+ paths-ignore:
+ - 'docs/**'
+ - '*.md'
jobs:
semgrep:
name: Semgrep Scanner
@@ -32,7 +38,8 @@ jobs:
- name: Dotenv Action
id: dotenv
uses: falti/dotenv-action@d4d12eaa0e1dd06d5bdc3d7af3bf4c8c93cb5359
- - name: Run FOSSA Test
+ - name: Run FOSSA Test
+ if: ${{ secrets.FOSSA_API_TOKEN != '' }}
uses: fossas/fossa-action@main
with:
api-key: ${{secrets.FOSSA_API_TOKEN}}
\ No newline at end of file
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 000000000..261d12168
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,116 @@
+version: "2"
+run:
+ allow-parallel-runners: true
+ timeout: 10m
+linters:
+ default: none
+ enable:
+ - copyloopvar
+ - ginkgolinter
+ - govet
+ - ineffassign
+ - misspell
+ - nakedret
+ - staticcheck
+ - unconvert
+ - unused
+ settings:
+ gocyclo:
+ min-complexity: 50
+ goconst:
+ min-len: 3
+ min-occurrences: 10
+ exclusions:
+ generated: lax
+ rules:
+ - linters:
+ - goconst
+ path: _test\.go$
+ - linters:
+ - gocyclo
+ path: _test\.go$
+ - linters:
+ - unparam
+ path: _test\.go$
+ - linters:
+ - prealloc
+ path: _test\.go$
+ - linters:
+ - ineffassign
+ path: _test\.go$
+ - linters:
+ - ginkgolinter
+ path: _test\.go$
+ - linters:
+ - misspell
+ path: _test\.go$
+ - linters:
+ - unconvert
+ path: _test\.go$
+ - linters:
+ - goconst
+ - gocyclo
+ - unparam
+ - prealloc
+ - ineffassign
+ - unused
+ - ginkgolinter
+ - staticcheck
+ - misspell
+ - unconvert
+ path: test/
+ # Exclude ST1019 (duplicate imports) - project uses aliased imports intentionally
+ - linters:
+ - staticcheck
+ text: "ST1019"
+ # Exclude SA4022 (address of variable cannot be nil) - false positive for spec checks
+ - linters:
+ - staticcheck
+ text: "SA4022"
+ # Exclude ST1023 (should omit type from declaration) - style preference
+ - linters:
+ - staticcheck
+ text: "ST1023"
+ # Exclude QF1001/QF1003/QF1008/QF1011 - quickfix suggestions, not errors
+ - linters:
+ - staticcheck
+ text: "QF100"
+ - linters:
+ - staticcheck
+ text: "QF1011"
+ # Exclude S1021 (should merge variable declaration) - style preference
+ - linters:
+ - staticcheck
+ text: "S1021"
+ # Exclude ST1005 (error strings should not be capitalized) - existing codebase style
+ - linters:
+ - staticcheck
+ text: "ST1005"
+ # Exclude SA1019 (deprecated) - will be addressed separately
+ - linters:
+ - staticcheck
+ text: "SA1019"
+ # Exclude SA4006 (value never used) in test files
+ - linters:
+ - staticcheck
+ text: "SA4006"
+ path: _test\.go$
+ # Exclude SA1029 (context key type) in test files
+ - linters:
+ - staticcheck
+ text: "SA1029"
+ path: _test\.go$
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/AGENTS.md b/AGENTS.md
new file mode 100644
index 000000000..e2ba5781f
--- /dev/null
+++ b/AGENTS.md
@@ -0,0 +1,241 @@
+# Splunk Operator - AI Agent Guide
+
+This guide helps AI coding assistants understand the Splunk Operator project structure, development workflow, and common operations.
+
+## Project Overview
+
+The Splunk Operator is a Kubernetes operator that manages Splunk Enterprise deployments. It is built using:
+- **Language**: Go (see GO_VERSION in .env)
+- **Framework**: Kubernetes Operator SDK with controller-runtime
+- **Test Framework**: Ginkgo/Gomega
+- **CRD API Versions**: v1, v1alpha2, v1alpha3, v1beta1, v2, v3, v4
+
+## Repository Structure
+
+```
+├── api/ # Custom Resource Definitions (CRDs) for all API versions
+│ ├── v4/ # Current stable API version
+│ └── v3/ # Previous API version
+├── cmd/ # Main entry point for the operator
+├── config/ # Kubernetes manifests and configuration
+│ ├── crd/ # CRD base files
+│ ├── samples/ # Example CR manifests
+│ ├── default/ # Default kustomize configurations
+│ └── rbac/ # RBAC configurations
+├── docs/ # User-facing documentation
+├── helm-chart/ # Helm charts for operator and enterprise
+├── internal/ # Internal controller logic
+├── kuttl/ # KUTTL test scenarios
+├── pkg/ # Core business logic
+│ ├── splunk/
+│ │ ├── common/ # Common utilities
+│ │ ├── enterprise/ # Enterprise-specific logic
+│ │ ├── client/ # Splunk API client
+│ │ └── util/ # Utility functions
+├── test/ # Integration tests
+│ ├── testenv/ # Test environment utilities
+│ └── */ # Test suites by feature
+└── tools/ # Helper scripts and utilities
+```
+
+## Common Makefile Commands
+
+### Development Commands
+
+```bash
+# Display all available make targets with descriptions
+make help
+
+# Format code
+make fmt
+
+# Generate manifests (CRDs, RBAC, webhooks)
+make manifests
+
+# Generate DeepCopy methods
+make generate
+
+# Build the operator binary
+make build
+
+# Run unit tests
+make test
+
+# Build multi-platform images with buildx
+make docker-buildx IMG= PLATFORMS=linux/amd64,linux/arm64
+```
+
+### Deployment Commands
+
+```bash
+# Install CRDs into cluster
+make install
+
+# Uninstall CRDs from cluster
+make uninstall
+
+# Deploy operator to cluster
+make deploy IMG= NAMESPACE= ENVIRONMENT=
+
+# Undeploy operator from cluster
+make undeploy
+```
+
+### Documentation Commands
+
+```bash
+# Preview documentation locally (requires Ruby and bundler)
+make docs-preview
+# Access at http://localhost:4000/splunk-operator
+```
+
+## Development Workflow
+
+### 1. Making Code Changes
+
+When modifying the operator code, follow this workflow:
+
+```bash
+# 1. Create a feature branch from develop
+git checkout -b feature/your-feature develop
+
+# 2. Make your changes to the codebase
+# - API changes: api/v4/*.go
+# - Controller logic: internal/controller/*.go
+# - Business logic: pkg/splunk/**/*.go
+
+# 3. If you modified API types, regenerate code
+make manifests generate
+
+# 4. Format and vet your code
+make fmt vet
+
+# 5. Run unit tests
+make test
+
+# 6. Build the operator
+make build
+```
+
+### 2. Testing Changes
+
+#### Unit Tests
+
+Unit tests are located alongside source files and use Ginkgo/Gomega:
+
+```bash
+# Run all unit tests with coverage
+make test
+
+# Run specific test packages directly
+# (see ENVTEST_K8S_VERSION in Makefile)
+KUBEBUILDER_ASSETS="$(shell setup-envtest use ${ENVTEST_K8S_VERSION} -p path)" \
+ ginkgo -v ./pkg/splunk/common
+```
+
+Test coverage includes:
+- `pkg/splunk/common` - Common utilities
+- `pkg/splunk/enterprise` - Enterprise logic
+- `pkg/splunk/client` - API client
+- `pkg/splunk/util` - Utilities
+- `internal/controller` - Controller reconciliation logic
+
+#### Integration Tests
+
+**Integration Test Structure:**
+- Each test suite has its own directory under `test/`
+- Suite file: `*_suite_test.go` - Creates TestEnv (namespace)
+- Spec files: `*_test.go` - Contains test cases (test case Contexts with It blocks)
+- Test utilities: `test/testenv/` - Helper functions for deployments
+
+**Test Categories:**
+- `test/smoke/` - Basic smoke tests
+- `test/licensemanager/` - License manager tests
+- `test/monitoring_console/` - Monitoring console tests
+- `test/appframework_aws/` - App Framework with AWS S3
+- `test/appframework_az/` - App Framework with Azure Blob
+- `test/appframework_gcp/` - App Framework with GCP Storage
+- `test/smartstore/` - SmartStore functionality
+- `test/secret/` - Secret management
+- `test/custom_resource_crud/` - CR CRUD operations
+
+#### KUTTL Tests
+
+KUTTL provides declarative end-to-end testing:
+
+```bash
+# KUTTL test scenarios are in kuttl/tests/
+# Run with kubectl-kuttl (if installed)
+kubectl kuttl test --config kuttl/kuttl-test-kind.yaml
+```
+
+### 3. Documentation Updates
+
+When making changes that affect users:
+
+```bash
+# 1. Update relevant documentation in docs/
+# - GettingStarted.md - Installation and basic usage
+# - Examples.md - Code examples
+# - CustomResources.md - CR specifications
+# - AppFramework.md - App Framework details
+# - SmartStore.md - SmartStore configuration
+
+# 2. Preview documentation locally
+make docs-preview
+
+# 3. Update CONTRIBUTING.md if workflow changes
+```
+
+## Environment Variables
+
+Key environment variables used in development:
+
+```bash
+# Operator configuration
+NAMESPACE=splunk-operator # Target namespace
+WATCH_NAMESPACE="" # Watch all namespaces (cluster-wide)
+ENVIRONMENT=default # Deployment environment
+
+# Splunk configuration
+SPLUNK_ENTERPRISE_IMAGE=(See SPLUNK_ENTERPRISE_RELEASE_IMAGE in .env) # Splunk Enterprise image
+SPLUNK_GENERAL_TERMS="" # SGT acceptance (required)
+
+# Testing
+SPLUNK_OPERATOR_IMAGE=splunk/splunk-operator:latest
+CLUSTER_PROVIDER=kind # kind, eks, azure, gcp
+PRIVATE_REGISTRY=localhost:5000
+
+# Cloud provider credentials (for integration tests)
+TEST_S3_ACCESS_KEY_ID=...
+TEST_S3_SECRET_ACCESS_KEY=...
+STORAGE_ACCOUNT=... # Azure
+STORAGE_ACCOUNT_KEY=... # Azure
+GCP_SERVICE_ACCOUNT_KEY=... # GCP
+```
+
+## Debugging Tips
+
+### Local Development
+
+```bash
+# Watch CRDs being reconciled
+kubectl get pods -n splunk-operator -w
+
+# Check operator logs
+kubectl logs -n splunk-operator deployment/splunk-operator-controller-manager -f
+
+# Describe a Custom Resource
+kubectl describe -n
+```
+
+### Common Issues
+
+1. **CRD not found**: Run `make install` to install CRDs
+2. **Permission errors**: Check RBAC with `kubectl auth can-i --list`
+3. **Image pull errors**: Verify `IMG` variable and registry access
+
+## Additional Resources
+- [Operator SDK Documentation](https://sdk.operatorframework.io/)
+- [Kubernetes API Reference](https://kubernetes.io/docs/reference/)
+- [Splunk Enterprise Documentation](https://help.splunk.com/en)
diff --git a/Dockerfile b/Dockerfile
index d6258cb44..b039d17e4 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -2,10 +2,10 @@
ARG PLATFORMS=linux/amd64,linux/arm64
ARG BASE_IMAGE=registry.access.redhat.com/ubi8/ubi-minimal
-ARG BASE_IMAGE_VERSION=8.10-1761032271
+ARG BASE_IMAGE_VERSION=8.10-1770223153
# Build the manager binary
-FROM golang:1.25.5 AS builder
+FROM golang:1.25.7 AS builder
WORKDIR /workspace
diff --git a/Dockerfile.distroless b/Dockerfile.distroless
index ae073445b..37121e4a6 100644
--- a/Dockerfile.distroless
+++ b/Dockerfile.distroless
@@ -1,5 +1,5 @@
# Build the manager binary
-FROM golang:1.25.5 AS builder
+FROM golang:1.25.7 AS builder
WORKDIR /workspace
# Copy the Go Modules manifests
diff --git a/Makefile b/Makefile
index d5f06bdd1..170ba70a6 100644
--- a/Makefile
+++ b/Makefile
@@ -59,7 +59,9 @@ BUNDLE_IMG ?= ${IMAGE_TAG_BASE}-bundle:v${VERSION}
# Image URL to use all building/pushing image targets
IMG ?= controller:latest
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
-ENVTEST_K8S_VERSION = 1.34.0
+# Automatically derive the version from go.mod
+ENVTEST_VERSION := $(shell go list -m -f "{{ .Version }}" sigs.k8s.io/controller-runtime | awk -F'[v.]' '{printf "release-%d.%d", $$2, $$3}')
+ENVTEST_K8S_VERSION := $(shell go list -m -f "{{ .Version }}" k8s.io/api | awk -F'[v.]' '{printf "1.%d", $$3}')
ignore-not-found ?= True
@@ -135,7 +137,7 @@ scheck: ## Run static check against code
vet: setup/ginkgo ## Run go vet against code.
go vet ./...
-test: manifests generate fmt vet envtest ## Run tests.
+test: manifests generate fmt vet setup-envtest ## Run tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use ${ENVTEST_K8S_VERSION} --bin-dir $(LOCALBIN) -p path)" ginkgo --junit-report=unit_test.xml --output-dir=`pwd` -vv --trace --keep-going --timeout=3h --cover --covermode=count --coverprofile=coverage.out ./pkg/splunk/common ./pkg/splunk/enterprise ./pkg/splunk/client ./pkg/splunk/util ./internal/controller ./pkg/splunk/splkcontroller
@@ -167,12 +169,12 @@ docker-push: ## Push docker image with the manager.
# Defaults:
# Build Platform: linux/amd64,linux/arm64
# Build Base OS: registry.access.redhat.com/ubi8/ubi-minimal
-# Build Base OS Version: 8.10-1761032271
+# Build Base OS Version: 8.10-1770223153
# Pass only what is required, the rest will be defaulted
# Setup defaults for build arguments
PLATFORMS ?= linux/amd64,linux/arm64
BASE_IMAGE ?= registry.access.redhat.com/ubi8/ubi-minimal
-BASE_IMAGE_VERSION ?= 8.10-1761032271
+BASE_IMAGE_VERSION ?= 8.10-1770223153
docker-buildx:
@if [ -z "${IMG}" ]; then \
@@ -206,6 +208,7 @@ deploy: manifests kustomize uninstall ## Deploy controller to the K8s cluster sp
$(SED) "s/value: WATCH_NAMESPACE_VALUE/value: \"${WATCH_NAMESPACE}\"/g" config/${ENVIRONMENT}/kustomization.yaml
$(SED) "s|SPLUNK_ENTERPRISE_IMAGE|${SPLUNK_ENTERPRISE_IMAGE}|g" config/${ENVIRONMENT}/kustomization.yaml
$(SED) "s/value: SPLUNK_GENERAL_TERMS_VALUE/value: \"${SPLUNK_GENERAL_TERMS}\"/g" config/${ENVIRONMENT}/kustomization.yaml
+ $(SED) 's/\("sokVersion": \)"[^"]*"/\1"$(VERSION)"/' config/manager/controller_manager_telemetry.yaml
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
RELATED_IMAGE_SPLUNK_ENTERPRISE=${SPLUNK_ENTERPRISE_IMAGE} WATCH_NAMESPACE=${WATCH_NAMESPACE} SPLUNK_GENERAL_TERMS=${SPLUNK_GENERAL_TERMS} $(KUSTOMIZE) build config/${ENVIRONMENT} | kubectl apply --server-side --force-conflicts -f -
$(SED) "s/namespace: ${NAMESPACE}/namespace: splunk-operator/g" config/${ENVIRONMENT}/kustomization.yaml
@@ -224,6 +227,7 @@ $(LOCALBIN):
## Tool Versions
KUSTOMIZE_VERSION ?= v5.4.3
CONTROLLER_TOOLS_VERSION ?= v0.18.0
+GOLANGCI_LINT_VERSION ?= v2.1.0
CONTROLLER_GEN = $(LOCALBIN)/controller-gen
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
@@ -241,8 +245,45 @@ envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
$(ENVTEST): $(LOCALBIN)
test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
+.PHONY: setup-envtest
+setup-envtest: envtest ## Set up ENVTEST binaries for the correct version
+ @$(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path || { \
+ echo "Error setting up envtest"; exit 1; }
+
+GOLANGCI_LINT = $(LOCALBIN)/golangci-lint
+golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
+$(GOLANGCI_LINT): $(LOCALBIN)
+ $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION))
+
+.PHONY: lint
+lint: golangci-lint ## Run golangci-lint linter
+ $(GOLANGCI_LINT) run
+
+.PHONY: lint-fix
+lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes
+ $(GOLANGCI_LINT) run --fix
+
+.PHONY: lint-config
+lint-config: golangci-lint ## Verify golangci-lint linter configuration
+ $(GOLANGCI_LINT) config verify
+
+# go-install-tool will 'go install' any package with custom target and target binary name
+# $1 - target path with name of binary
+# $2 - package url which can be installed
+# $3 - specific version of package
+define go-install-tool
+@[ -f "$(1)-$(3)" ] || { \
+set -e; \
+package=$(2)@$(3) ;\
+echo "Downloading $${package}" ;\
+rm -f $(1) || true ;\
+GOBIN=$(LOCALBIN) go install $${package} ;\
+mv $(1) $(1)-$(3) ;\
+} ;\
+ln -sf $(1)-$(3) $(1)
+endef
+
## Generate bundle manifests and metadata, then validate generated files.
-## In addition, copy the newly generated crd files to helm crds.
.PHONY: bundle
bundle: manifests kustomize
operator-sdk generate kustomize manifests -q
@@ -253,7 +294,6 @@ bundle: manifests kustomize
$(KUSTOMIZE) build config/manifests | operator-sdk generate bundle ${BUNDLE_GEN_FLAGS}
operator-sdk bundle validate ./bundle
operator-sdk bundle validate bundle --select-optional suite=operatorframework
- cp bundle/manifests/enterprise.splunk.com* helm-chart/splunk-operator/crds
.PHONY: bundle-build
bundle-build: ## Build the bundle image.
@@ -272,7 +312,7 @@ ifeq (,$(shell which opm 2>/dev/null))
set -e ;\
mkdir -p $(dir $(OPM)) ;\
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
- curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.24.2/$${OS}-$${ARCH}-opm ;\
+ curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.55.0/$${OS}-$${ARCH}-opm ;\
chmod +x $(OPM) ;\
}
else
@@ -354,6 +394,7 @@ run_clair_scan:
# generate artifacts needed to deploy operator, this is current way of doing it, need to fix this
generate-artifacts-namespace: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
+ $(SED) 's/\("sokVersion": \)"[^"]*"/\1"$(VERSION)"/' config/manager/controller_manager_telemetry.yaml
mkdir -p release-${VERSION}
cp config/default/kustomization-namespace.yaml config/default/kustomization.yaml
cp config/rbac/kustomization-namespace.yaml config/rbac/kustomization.yaml
@@ -369,6 +410,7 @@ generate-artifacts-namespace: manifests kustomize ## Deploy controller to the K8
# generate artifacts needed to deploy operator, this is current way of doing it, need to fix this
generate-artifacts-cluster: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
+ $(SED) 's/\("sokVersion": \)"[^"]*"/\1"$(VERSION)"/' config/manager/controller_manager_telemetry.yaml
mkdir -p release-${VERSION}
cp config/default/kustomization-cluster.yaml config/default/kustomization.yaml
cp config/rbac/kustomization-cluster.yaml config/rbac/kustomization.yaml
@@ -428,4 +470,4 @@ setup/ginkgo:
build-installer: manifests generate kustomize
mkdir -p dist
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
- $(KUSTOMIZE) build config/default > dist/install.yaml
\ No newline at end of file
+ $(KUSTOMIZE) build config/default > dist/install.yaml
diff --git a/PROJECT b/PROJECT
index 62abf2007..e87979069 100644
--- a/PROJECT
+++ b/PROJECT
@@ -1,3 +1,7 @@
+# Code generated by tool. DO NOT EDIT.
+# This file is used to track the info used to scaffold your project
+# and allow the plugins properly work.
+# More info: https://book.kubebuilder.io/reference/project-config.html
domain: splunk.com
layout:
- go.kubebuilder.io/v4
@@ -109,4 +113,31 @@ resources:
kind: LicenseManager
path: github.com/splunk/splunk-operator/api/v4
version: v4
+- api:
+ crdVersion: v1
+ namespaced: true
+ controller: true
+ domain: splunk.com
+ group: enterprise
+ kind: IngestorCluster
+ path: github.com/splunk/splunk-operator/api/v4
+ version: v4
+- api:
+ crdVersion: v1
+ namespaced: true
+ controller: true
+ domain: splunk.com
+ group: enterprise
+ kind: Queue
+ path: github.com/splunk/splunk-operator/api/v4
+ version: v4
+- api:
+ crdVersion: v1
+ namespaced: true
+ controller: true
+ domain: splunk.com
+ group: enterprise
+ kind: ObjectStorage
+ path: github.com/splunk/splunk-operator/api/v4
+ version: v4
version: "3"
diff --git a/api/v4/clustermanager_types.go b/api/v4/clustermanager_types.go
index d55f6e678..bf5c6d4fb 100644
--- a/api/v4/clustermanager_types.go
+++ b/api/v4/clustermanager_types.go
@@ -68,7 +68,7 @@ type ClusterManagerStatus struct {
// Telemetry App installation flag
TelAppInstalled bool `json:"telAppInstalled"`
- // Auxillary message describing CR status
+ // Auxiliary message describing CR status
Message string `json:"message"`
}
@@ -90,7 +90,7 @@ type BundlePushInfo struct {
// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".status.replicas",description="Desired number of indexer peers"
// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Current number of ready indexer peers"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of cluster manager"
-// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxiliary message describing CR status"
// +kubebuilder:storageversion
type ClusterManager struct {
metav1.TypeMeta `json:",inline"`
diff --git a/api/v4/common_types.go b/api/v4/common_types.go
index 5bba9c0cd..78b6a3bd7 100644
--- a/api/v4/common_types.go
+++ b/api/v4/common_types.go
@@ -91,8 +91,8 @@ type Spec struct {
// Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables)
Image string `json:"image"`
- // Sets pull policy for all images (either “Always” or the default: “IfNotPresent”)
- // +kubebuilder:validation:Enum=Always;IfNotPresent
+ // Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")
+ // +kubebuilder:validation:Enum=Always;Never;IfNotPresent
ImagePullPolicy string `json:"imagePullPolicy"`
// Name of Scheduler to use for pod placement (defaults to “default-scheduler”)
@@ -137,7 +137,7 @@ const (
// PhaseTerminating means a custom resource is in the process of being removed
PhaseTerminating Phase = "Terminating"
- // PhaseError means an error occured with custom resource management
+ // PhaseError means an error occurred with custom resource management
PhaseError Phase = "Error"
)
@@ -318,7 +318,7 @@ type VolumeSpec struct {
Region string `json:"region"`
}
-// VolumeAndTypeSpec used to add any custom varaibles for volume implementation
+// VolumeAndTypeSpec used to add any custom variables for volume implementation
type VolumeAndTypeSpec struct {
VolumeSpec `json:",inline"`
}
@@ -375,7 +375,7 @@ type AppSourceDefaultSpec struct {
// PremiumAppsProps represents properties for premium apps such as ES
type PremiumAppsProps struct {
- // Type: enterpriseSecurity for now, can accomodate itsi etc.. later
+ // Type: enterpriseSecurity for now, can accommodate itsi etc.. later
// +optional
Type string `json:"type,omitempty"`
@@ -483,7 +483,7 @@ type AppSrcDeployInfo struct {
type BundlePushStageType int
const (
- // BundlePushUninitialized indicates bundle push never happend
+ // BundlePushUninitialized indicates bundle push never happened
BundlePushUninitialized BundlePushStageType = iota
// BundlePushPending waiting for all the apps to be copied to the Pod
BundlePushPending
diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go
index 9bb7b31a8..537f2863a 100644
--- a/api/v4/indexercluster_types.go
+++ b/api/v4/indexercluster_types.go
@@ -34,10 +34,21 @@ const (
IndexerClusterPausedAnnotation = "indexercluster.enterprise.splunk.com/paused"
)
+// +kubebuilder:validation:XValidation:rule="has(self.queueRef) == has(self.objectStorageRef)",message="queueRef and objectStorageRef must both be set or both be empty"
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.queueRef) || self.queueRef == oldSelf.queueRef",message="queueRef is immutable once created"
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.objectStorageRef) || self.objectStorageRef == oldSelf.objectStorageRef",message="objectStorageRef is immutable once created"
// IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster
type IndexerClusterSpec struct {
CommonSplunkSpec `json:",inline"`
+ // +optional
+ // Queue reference
+ QueueRef corev1.ObjectReference `json:"queueRef"`
+
+ // +optional
+ // Object Storage reference
+ ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"`
+
// Number of search head pods; a search head cluster will be created if > 1
Replicas int32 `json:"replicas"`
}
@@ -109,8 +120,14 @@ type IndexerClusterStatus struct {
// status of each indexer cluster peer
Peers []IndexerClusterMemberStatus `json:"peers"`
- // Auxillary message describing CR status
+ // Auxiliary message describing CR status
Message string `json:"message"`
+
+ // Credential secret version to track changes to the secret and trigger rolling restart of indexer cluster peers when the secret is updated
+ CredentialSecretVersion string `json:"credentialSecretVersion,omitempty"`
+
+ // Service account to track changes to the service account and trigger rolling restart of indexer cluster peers when the service account is updated
+ ServiceAccount string `json:"serviceAccount,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -126,7 +143,7 @@ type IndexerClusterStatus struct {
// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".status.replicas",description="Desired number of indexer peers"
// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Current number of ready indexer peers"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of indexer cluster"
-// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxiliary message describing CR status"
// +kubebuilder:storageversion
type IndexerCluster struct {
metav1.TypeMeta `json:",inline"`
diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go
new file mode 100644
index 000000000..4e206e76a
--- /dev/null
+++ b/api/v4/ingestorcluster_types.go
@@ -0,0 +1,158 @@
+// Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v4
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+const (
+ // IngestorClusterPausedAnnotation is the annotation that pauses the reconciliation (triggers
+ // an immediate requeue)
+ IngestorClusterPausedAnnotation = "ingestorcluster.enterprise.splunk.com/paused"
+)
+
+// +kubebuilder:validation:XValidation:rule="self.queueRef == oldSelf.queueRef",message="queueRef is immutable once created"
+// +kubebuilder:validation:XValidation:rule="self.objectStorageRef == oldSelf.objectStorageRef",message="objectStorageRef is immutable once created"
+// IngestorClusterSpec defines the spec of Ingestor Cluster
+type IngestorClusterSpec struct {
+ // Common Splunk spec
+ CommonSplunkSpec `json:",inline"`
+
+ // Number of ingestor pods
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:default=1
+ Replicas int32 `json:"replicas"`
+
+ // Splunk Enterprise app repository that specifies remote app location and scope for Splunk app management
+ AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"`
+
+ // +kubebuilder:validation:Required
+ // Queue reference
+ QueueRef corev1.ObjectReference `json:"queueRef"`
+
+ // +kubebuilder:validation:Required
+ // Object Storage reference
+ ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"`
+}
+
+// IngestorClusterStatus defines the observed state of Ingestor Cluster
+type IngestorClusterStatus struct {
+ // Phase of the ingestor pods
+ Phase Phase `json:"phase"`
+
+ // Number of desired ingestor pods
+ Replicas int32 `json:"replicas"`
+
+ // Number of ready ingestor pods
+ ReadyReplicas int32 `json:"readyReplicas"`
+
+ // Selector for pods used by HorizontalPodAutoscaler
+ Selector string `json:"selector"`
+
+ // Resource revision tracker
+ ResourceRevMap map[string]string `json:"resourceRevMap"`
+
+ // App Framework context
+ AppContext AppDeploymentContext `json:"appContext"`
+
+ // Telemetry App installation flag
+ TelAppInstalled bool `json:"telAppInstalled"`
+
+ // Auxillary message describing CR status
+ Message string `json:"message"`
+
+ // Credential secret version to track changes to the secret and trigger rolling restart of indexer cluster peers when the secret is updated
+ CredentialSecretVersion string `json:"credentialSecretVersion,omitempty"`
+
+ // Service account to track changes to the service account and trigger rolling restart of indexer cluster peers when the service account is updated
+ ServiceAccount string `json:"serviceAccount,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+
+// IngestorCluster is the Schema for a Splunk Enterprise ingestor cluster pods
+// +k8s:openapi-gen=true
+// +kubebuilder:subresource:status
+// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector
+// +kubebuilder:resource:path=ingestorclusters,scope=Namespaced,shortName=ing
+// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of ingestor cluster pods"
+// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".status.replicas",description="Number of desired ingestor cluster pods"
+// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Current number of ready ingestor cluster pods"
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of ingestor cluster resource"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status"
+// +kubebuilder:storageversion
+
+// IngestorCluster is the Schema for the ingestorclusters API
+type IngestorCluster struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty,omitzero"`
+
+ Spec IngestorClusterSpec `json:"spec"`
+ Status IngestorClusterStatus `json:"status,omitempty,omitzero"`
+}
+
+// DeepCopyObject implements runtime.Object
+func (in *IngestorCluster) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// +kubebuilder:object:root=true
+
+// IngestorClusterList contains a list of IngestorCluster
+type IngestorClusterList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []IngestorCluster `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&IngestorCluster{}, &IngestorClusterList{})
+}
+
+// NewEvent creates a new event associated with the object and ready
+// to be published to Kubernetes API
+func (ic *IngestorCluster) NewEvent(eventType, reason, message string) corev1.Event {
+ t := metav1.Now()
+ return corev1.Event{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: reason + "-",
+ Namespace: ic.ObjectMeta.Namespace,
+ },
+ InvolvedObject: corev1.ObjectReference{
+ Kind: "IngestorCluster",
+ Namespace: ic.Namespace,
+ Name: ic.Name,
+ UID: ic.UID,
+ APIVersion: GroupVersion.String(),
+ },
+ Reason: reason,
+ Message: message,
+ Source: corev1.EventSource{
+ Component: "splunk-ingestor-cluster-controller",
+ },
+ FirstTimestamp: t,
+ LastTimestamp: t,
+ Count: 1,
+ Type: eventType,
+ ReportingController: "enterprise.splunk.com/ingestor-cluster-controller",
+ }
+}
diff --git a/api/v4/licensemanager_types.go b/api/v4/licensemanager_types.go
index e7a33c720..d48b221b3 100644
--- a/api/v4/licensemanager_types.go
+++ b/api/v4/licensemanager_types.go
@@ -53,7 +53,7 @@ type LicenseManagerStatus struct {
// Telemetry App installation flag
TelAppInstalled bool `json:"telAppInstalled"`
- // Auxillary message describing CR status
+ // Auxiliary message describing CR status
Message string `json:"message"`
}
@@ -65,7 +65,7 @@ type LicenseManagerStatus struct {
// +kubebuilder:resource:path=licensemanagers,scope=Namespaced,shortName=lmanager
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of license manager"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of license manager"
-// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxiliary message describing CR status"
// +kubebuilder:storageversion
type LicenseManager struct {
metav1.TypeMeta `json:",inline"`
diff --git a/api/v4/monitoringconsole_types.go b/api/v4/monitoringconsole_types.go
index 219d35dbc..90561eb2e 100644
--- a/api/v4/monitoringconsole_types.go
+++ b/api/v4/monitoringconsole_types.go
@@ -59,7 +59,7 @@ type MonitoringConsoleStatus struct {
// App Framework status
AppContext AppDeploymentContext `json:"appContext,omitempty"`
- // Auxillary message describing CR status
+ // Auxiliary message describing CR status
Message string `json:"message"`
}
@@ -73,7 +73,7 @@ type MonitoringConsoleStatus struct {
// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".status.replicas",description="Desired number of monitoring console members"
// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Current number of ready monitoring console members"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of monitoring console"
-// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxiliary message describing CR status"
// +kubebuilder:storageversion
type MonitoringConsole struct {
metav1.TypeMeta `json:",inline"`
diff --git a/api/v4/objectstorage_types.go b/api/v4/objectstorage_types.go
new file mode 100644
index 000000000..cfdda5a86
--- /dev/null
+++ b/api/v4/objectstorage_types.go
@@ -0,0 +1,107 @@
+// Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v4
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+const (
+ // ObjectStoragePausedAnnotation is the annotation that pauses the reconciliation (triggers
+ // an immediate requeue)
+ ObjectStoragePausedAnnotation = "objectstorage.enterprise.splunk.com/paused"
+)
+
+// +kubebuilder:validation:XValidation:rule="self.provider == oldSelf.provider",message="provider is immutable once created"
+// +kubebuilder:validation:XValidation:rule="self.s3 == oldSelf.s3",message="s3 is immutable once created"
+// +kubebuilder:validation:XValidation:rule="self.provider != 's3' || has(self.s3)",message="s3 must be provided when provider is s3"
+// ObjectStorageSpec defines the desired state of ObjectStorage
+type ObjectStorageSpec struct {
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum=s3
+ // Provider of queue resources
+ Provider string `json:"provider"`
+
+ // +kubebuilder:validation:Required
+ // s3 specific inputs
+ S3 S3Spec `json:"s3"`
+}
+
+type S3Spec struct {
+ // +optional
+ // +kubebuilder:validation:Pattern=`^https?://[^\s/$.?#].[^\s]*$`
+ // S3-compatible Service endpoint
+ Endpoint string `json:"endpoint"`
+
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^(?:s3://)?[a-z0-9.-]{3,63}(?:/[^\s]+)?$`
+ // S3 bucket path
+ Path string `json:"path"`
+}
+
+// ObjectStorageStatus defines the observed state of ObjectStorage.
+type ObjectStorageStatus struct {
+ // Phase of the object storage
+ Phase Phase `json:"phase"`
+
+ // Resource revision tracker
+ ResourceRevMap map[string]string `json:"resourceRevMap"`
+
+ // Auxillary message describing CR status
+ Message string `json:"message"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+
+// ObjectStorage is the Schema for a Splunk Enterprise object storage
+// +k8s:openapi-gen=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=objectstorages,scope=Namespaced,shortName=os
+// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of object storage"
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of object storage resource"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status"
+// +kubebuilder:storageversion
+
+// ObjectStorage is the Schema for the objectstorages API
+type ObjectStorage struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty,omitzero"`
+
+ Spec ObjectStorageSpec `json:"spec"`
+ Status ObjectStorageStatus `json:"status,omitempty,omitzero"`
+}
+
+// DeepCopyObject implements runtime.Object
+func (in *ObjectStorage) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// +kubebuilder:object:root=true
+
+// ObjectStorageList contains a list of ObjectStorage
+type ObjectStorageList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ObjectStorage `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&ObjectStorage{}, &ObjectStorageList{})
+}
diff --git a/api/v4/queue_types.go b/api/v4/queue_types.go
new file mode 100644
index 000000000..b86bd23a9
--- /dev/null
+++ b/api/v4/queue_types.go
@@ -0,0 +1,124 @@
+// Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v4
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+const (
+ // QueuePausedAnnotation is the annotation that pauses the reconciliation (triggers
+ // an immediate requeue)
+ QueuePausedAnnotation = "queue.enterprise.splunk.com/paused"
+)
+
+// +kubebuilder:validation:XValidation:rule="self.provider == oldSelf.provider",message="provider is immutable once created"
+// +kubebuilder:validation:XValidation:rule="self.sqs.name == oldSelf.sqs.name",message="sqs.name is immutable once created"
+// +kubebuilder:validation:XValidation:rule="self.sqs.authRegion == oldSelf.sqs.authRegion",message="sqs.authRegion is immutable once created"
+// +kubebuilder:validation:XValidation:rule="self.sqs.dlq == oldSelf.sqs.dlq",message="sqs.dlq is immutable once created"
+// +kubebuilder:validation:XValidation:rule="self.sqs.endpoint == oldSelf.sqs.endpoint",message="sqs.endpoint is immutable once created"
+// +kubebuilder:validation:XValidation:rule="(self.provider != 'sqs' && self.provider != 'sqs_cp') || has(self.sqs)",message="sqs must be provided when provider is sqs or sqs_cp"
+// QueueSpec defines the desired state of Queue
+type QueueSpec struct {
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum=sqs;sqs_cp
+ // Provider of queue resources
+ Provider string `json:"provider"`
+
+ // +kubebuilder:validation:Required
+ // sqs specific inputs
+ SQS SQSSpec `json:"sqs"`
+}
+
+type SQSSpec struct {
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // Name of the queue
+ Name string `json:"name"`
+
+ // +optional
+ // +kubebuilder:validation:Pattern=`^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$`
+ // Auth Region of the resources
+ AuthRegion string `json:"authRegion"`
+
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // Name of the dead letter queue resource
+ DLQ string `json:"dlq"`
+
+ // +optional
+ // +kubebuilder:validation:Pattern=`^https?://[^\s/$.?#].[^\s]*$`
+ // Amazon SQS Service endpoint
+ Endpoint string `json:"endpoint"`
+
+ // +optional
+ // List of remote storage volumes
+ VolList []VolumeSpec `json:"volumes,omitempty"`
+}
+
+// QueueStatus defines the observed state of Queue
+type QueueStatus struct {
+ // Phase of the queue
+ Phase Phase `json:"phase"`
+
+ // Resource revision tracker
+ ResourceRevMap map[string]string `json:"resourceRevMap"`
+
+ // Auxillary message describing CR status
+ Message string `json:"message"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+
+// Queue is the Schema for a Splunk Enterprise queue
+// +k8s:openapi-gen=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=queues,scope=Namespaced,shortName=queue
+// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of queue"
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of queue resource"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status"
+// +kubebuilder:storageversion
+
+// Queue is the Schema for the queues API
+type Queue struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty,omitzero"`
+
+ Spec QueueSpec `json:"spec"`
+ Status QueueStatus `json:"status,omitempty,omitzero"`
+}
+
+// DeepCopyObject implements runtime.Object
+func (in *Queue) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// +kubebuilder:object:root=true
+
+// QueueList contains a list of Queue
+type QueueList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Queue `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Queue{}, &QueueList{})
+}
diff --git a/api/v4/searchheadcluster_types.go b/api/v4/searchheadcluster_types.go
index 67bdd24ba..00ea9febe 100644
--- a/api/v4/searchheadcluster_types.go
+++ b/api/v4/searchheadcluster_types.go
@@ -126,7 +126,7 @@ type SearchHeadClusterStatus struct {
// Telemetry App installation flag
TelAppInstalled bool `json:"telAppInstalled"`
- // Auxillary message describing CR status
+ // Auxiliary message describing CR status
Message string `json:"message"`
UpgradePhase UpgradePhase `json:"upgradePhase"`
@@ -154,7 +154,7 @@ const (
// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".status.replicas",description="Desired number of search head cluster members"
// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Current number of ready search head cluster members"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of search head cluster"
-// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxiliary message describing CR status"
// +kubebuilder:storageversion
type SearchHeadCluster struct {
metav1.TypeMeta `json:",inline"`
diff --git a/api/v4/standalone_types.go b/api/v4/standalone_types.go
index 45220958c..f19c09b5c 100644
--- a/api/v4/standalone_types.go
+++ b/api/v4/standalone_types.go
@@ -74,7 +74,7 @@ type StandaloneStatus struct {
// Telemetry App installation flag
TelAppInstalled bool `json:"telAppInstalled"`
- // Auxillary message describing CR status
+ // Auxiliary message describing CR status
Message string `json:"message"`
}
@@ -89,7 +89,7 @@ type StandaloneStatus struct {
// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".status.replicas",description="Number of desired standalone instances"
// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Current number of ready standalone instances"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of standalone resource"
-// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxiliary message describing CR status"
// +kubebuilder:storageversion
type Standalone struct {
metav1.TypeMeta `json:",inline"`
diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go
index 93e988463..c7759fa58 100644
--- a/api/v4/zz_generated.deepcopy.go
+++ b/api/v4/zz_generated.deepcopy.go
@@ -22,7 +22,7 @@ package v4
import (
"k8s.io/api/core/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -511,6 +511,8 @@ func (in *IndexerClusterMemberStatus) DeepCopy() *IndexerClusterMemberStatus {
func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) {
*out = *in
in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec)
+ out.QueueRef = in.QueueRef
+ out.ObjectStorageRef = in.ObjectStorageRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterSpec.
@@ -555,6 +557,99 @@ func (in *IndexerClusterStatus) DeepCopy() *IndexerClusterStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngestorCluster) DeepCopyInto(out *IngestorCluster) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorCluster.
+func (in *IngestorCluster) DeepCopy() *IngestorCluster {
+ if in == nil {
+ return nil
+ }
+ out := new(IngestorCluster)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngestorClusterList) DeepCopyInto(out *IngestorClusterList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]IngestorCluster, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterList.
+func (in *IngestorClusterList) DeepCopy() *IngestorClusterList {
+ if in == nil {
+ return nil
+ }
+ out := new(IngestorClusterList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IngestorClusterList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngestorClusterSpec) DeepCopyInto(out *IngestorClusterSpec) {
+ *out = *in
+ in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec)
+ in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig)
+ out.QueueRef = in.QueueRef
+ out.ObjectStorageRef = in.ObjectStorageRef
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterSpec.
+func (in *IngestorClusterSpec) DeepCopy() *IngestorClusterSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IngestorClusterSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) {
+ *out = *in
+ if in.ResourceRevMap != nil {
+ in, out := &in.ResourceRevMap, &out.ResourceRevMap
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ in.AppContext.DeepCopyInto(&out.AppContext)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterStatus.
+func (in *IngestorClusterStatus) DeepCopy() *IngestorClusterStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(IngestorClusterStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LicenseManager) DeepCopyInto(out *LicenseManager) {
*out = *in
@@ -747,6 +842,95 @@ func (in *MonitoringConsoleStatus) DeepCopy() *MonitoringConsoleStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectStorage) DeepCopyInto(out *ObjectStorage) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorage.
+func (in *ObjectStorage) DeepCopy() *ObjectStorage {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectStorage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectStorageList) DeepCopyInto(out *ObjectStorageList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ObjectStorage, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageList.
+func (in *ObjectStorageList) DeepCopy() *ObjectStorageList {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectStorageList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ObjectStorageList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectStorageSpec) DeepCopyInto(out *ObjectStorageSpec) {
+ *out = *in
+ out.S3 = in.S3
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSpec.
+func (in *ObjectStorageSpec) DeepCopy() *ObjectStorageSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectStorageSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectStorageStatus) DeepCopyInto(out *ObjectStorageStatus) {
+ *out = *in
+ if in.ResourceRevMap != nil {
+ in, out := &in.ResourceRevMap, &out.ResourceRevMap
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageStatus.
+func (in *ObjectStorageStatus) DeepCopy() *ObjectStorageStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectStorageStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PhaseInfo) DeepCopyInto(out *PhaseInfo) {
*out = *in
@@ -793,6 +977,130 @@ func (in *Probe) DeepCopy() *Probe {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Queue) DeepCopyInto(out *Queue) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Queue.
+func (in *Queue) DeepCopy() *Queue {
+ if in == nil {
+ return nil
+ }
+ out := new(Queue)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QueueList) DeepCopyInto(out *QueueList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Queue, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueList.
+func (in *QueueList) DeepCopy() *QueueList {
+ if in == nil {
+ return nil
+ }
+ out := new(QueueList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *QueueList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QueueSpec) DeepCopyInto(out *QueueSpec) {
+ *out = *in
+ in.SQS.DeepCopyInto(&out.SQS)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueSpec.
+func (in *QueueSpec) DeepCopy() *QueueSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(QueueSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QueueStatus) DeepCopyInto(out *QueueStatus) {
+ *out = *in
+ if in.ResourceRevMap != nil {
+ in, out := &in.ResourceRevMap, &out.ResourceRevMap
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueStatus.
+func (in *QueueStatus) DeepCopy() *QueueStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(QueueStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *S3Spec) DeepCopyInto(out *S3Spec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Spec.
+func (in *S3Spec) DeepCopy() *S3Spec {
+ if in == nil {
+ return nil
+ }
+ out := new(S3Spec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SQSSpec) DeepCopyInto(out *SQSSpec) {
+ *out = *in
+ if in.VolList != nil {
+ in, out := &in.VolList, &out.VolList
+ *out = make([]VolumeSpec, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQSSpec.
+func (in *SQSSpec) DeepCopy() *SQSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SQSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SearchHeadCluster) DeepCopyInto(out *SearchHeadCluster) {
*out = *in
diff --git a/bundle/manifests/enterprise.splunk.com_clustermanagers.yaml b/bundle/manifests/enterprise.splunk.com_clustermanagers.yaml
index 343506c50..06bf024fd 100644
--- a/bundle/manifests/enterprise.splunk.com_clustermanagers.yaml
+++ b/bundle/manifests/enterprise.splunk.com_clustermanagers.yaml
@@ -6,6 +6,7 @@ metadata:
creationTimestamp: null
labels:
name: splunk-operator
+ app.kubernetes.io/name: splunk-operator
name: clustermanagers.enterprise.splunk.com
spec:
group: enterprise.splunk.com
diff --git a/bundle/manifests/enterprise.splunk.com_clustermasters.yaml b/bundle/manifests/enterprise.splunk.com_clustermasters.yaml
index 1bca4aa49..e08162aba 100644
--- a/bundle/manifests/enterprise.splunk.com_clustermasters.yaml
+++ b/bundle/manifests/enterprise.splunk.com_clustermasters.yaml
@@ -6,6 +6,7 @@ metadata:
creationTimestamp: null
labels:
name: splunk-operator
+ app.kubernetes.io/name: splunk-operator
name: clustermasters.enterprise.splunk.com
spec:
group: enterprise.splunk.com
diff --git a/bundle/manifests/enterprise.splunk.com_indexerclusters.yaml b/bundle/manifests/enterprise.splunk.com_indexerclusters.yaml
index 551ee9c96..21698c34f 100644
--- a/bundle/manifests/enterprise.splunk.com_indexerclusters.yaml
+++ b/bundle/manifests/enterprise.splunk.com_indexerclusters.yaml
@@ -6,6 +6,7 @@ metadata:
creationTimestamp: null
labels:
name: splunk-operator
+ app.kubernetes.io/name: splunk-operator
name: indexerclusters.enterprise.splunk.com
spec:
group: enterprise.splunk.com
diff --git a/bundle/manifests/enterprise.splunk.com_licensemanagers.yaml b/bundle/manifests/enterprise.splunk.com_licensemanagers.yaml
index 82b3b6743..5830ce70e 100644
--- a/bundle/manifests/enterprise.splunk.com_licensemanagers.yaml
+++ b/bundle/manifests/enterprise.splunk.com_licensemanagers.yaml
@@ -6,6 +6,7 @@ metadata:
creationTimestamp: null
labels:
name: splunk-operator
+ app.kubernetes.io/name: splunk-operator
name: licensemanagers.enterprise.splunk.com
spec:
group: enterprise.splunk.com
diff --git a/bundle/manifests/enterprise.splunk.com_licensemasters.yaml b/bundle/manifests/enterprise.splunk.com_licensemasters.yaml
index 9ef2b9e0b..4e924d672 100644
--- a/bundle/manifests/enterprise.splunk.com_licensemasters.yaml
+++ b/bundle/manifests/enterprise.splunk.com_licensemasters.yaml
@@ -6,6 +6,7 @@ metadata:
creationTimestamp: null
labels:
name: splunk-operator
+ app.kubernetes.io/name: splunk-operator
name: licensemasters.enterprise.splunk.com
spec:
group: enterprise.splunk.com
diff --git a/bundle/manifests/enterprise.splunk.com_monitoringconsoles.yaml b/bundle/manifests/enterprise.splunk.com_monitoringconsoles.yaml
index 7b36dc27f..10ccbdf39 100644
--- a/bundle/manifests/enterprise.splunk.com_monitoringconsoles.yaml
+++ b/bundle/manifests/enterprise.splunk.com_monitoringconsoles.yaml
@@ -6,6 +6,7 @@ metadata:
creationTimestamp: null
labels:
name: splunk-operator
+ app.kubernetes.io/name: splunk-operator
name: monitoringconsoles.enterprise.splunk.com
spec:
group: enterprise.splunk.com
diff --git a/bundle/manifests/enterprise.splunk.com_searchheadclusters.yaml b/bundle/manifests/enterprise.splunk.com_searchheadclusters.yaml
index f280f9e94..0d2721941 100644
--- a/bundle/manifests/enterprise.splunk.com_searchheadclusters.yaml
+++ b/bundle/manifests/enterprise.splunk.com_searchheadclusters.yaml
@@ -6,6 +6,7 @@ metadata:
creationTimestamp: null
labels:
name: splunk-operator
+ app.kubernetes.io/name: splunk-operator
name: searchheadclusters.enterprise.splunk.com
spec:
group: enterprise.splunk.com
diff --git a/bundle/manifests/enterprise.splunk.com_standalones.yaml b/bundle/manifests/enterprise.splunk.com_standalones.yaml
index ff14c2184..17b191359 100644
--- a/bundle/manifests/enterprise.splunk.com_standalones.yaml
+++ b/bundle/manifests/enterprise.splunk.com_standalones.yaml
@@ -6,6 +6,7 @@ metadata:
creationTimestamp: null
labels:
name: splunk-operator
+ app.kubernetes.io/name: splunk-operator
name: standalones.enterprise.splunk.com
spec:
group: enterprise.splunk.com
diff --git a/bundle/manifests/splunk-operator.clusterserviceversion.yaml b/bundle/manifests/splunk-operator.clusterserviceversion.yaml
index 9ee619ae5..e546cc0ee 100644
--- a/bundle/manifests/splunk-operator.clusterserviceversion.yaml
+++ b/bundle/manifests/splunk-operator.clusterserviceversion.yaml
@@ -351,6 +351,7 @@ spec:
labels:
control-plane: controller-manager
name: splunk-operator
+ app.kubernetes.io/name: splunk-operator
spec:
containers:
- args:
diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml
new file mode 100644
index 000000000..6ce642c7c
--- /dev/null
+++ b/config/certmanager/certificate.yaml
@@ -0,0 +1,38 @@
+# The following manifests contain a self-signed issuer CR and a certificate CR.
+# More document can be found at https://docs.cert-manager.io
+# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes.
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ labels:
+ app.kubernetes.io/name: issuer
+ app.kubernetes.io/instance: selfsigned-issuer
+ app.kubernetes.io/component: certificate
+ app.kubernetes.io/created-by: splunk-operator
+ app.kubernetes.io/part-of: splunk-operator
+ app.kubernetes.io/managed-by: kustomize
+ name: selfsigned-issuer
+ namespace: system
+spec:
+ selfSigned: {}
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ labels:
+ app.kubernetes.io/name: certificate
+ app.kubernetes.io/instance: serving-cert
+ app.kubernetes.io/component: certificate
+ app.kubernetes.io/created-by: splunk-operator
+ app.kubernetes.io/part-of: splunk-operator
+ app.kubernetes.io/managed-by: kustomize
+ name: serving-cert
+ namespace: system
+spec:
+ dnsNames:
+ - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc
+ - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local
+ issuerRef:
+ kind: Issuer
+ name: selfsigned-issuer
+ secretName: webhook-server-cert
diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml
new file mode 100644
index 000000000..bebea5a59
--- /dev/null
+++ b/config/certmanager/kustomization.yaml
@@ -0,0 +1,5 @@
+resources:
+- certificate.yaml
+
+configurations:
+- kustomizeconfig.yaml
diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml
new file mode 100644
index 000000000..ebc9836b1
--- /dev/null
+++ b/config/certmanager/kustomizeconfig.yaml
@@ -0,0 +1,13 @@
+# This configuration is for teaching kustomize how to update name ref and var substitution
+nameReference:
+- kind: Issuer
+ group: cert-manager.io
+ fieldSpecs:
+ - kind: Certificate
+ group: cert-manager.io
+ path: spec/issuerRef/name
+
+varReference:
+- kind: Certificate
+ group: cert-manager.io
+ path: spec/dnsNames
diff --git a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml
index c393cdfdc..0019d7504 100644
--- a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml
+++ b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml
@@ -37,7 +37,7 @@ spec:
jsonPath: .metadata.creationTimestamp
name: Age
type: date
- - description: Auxillary message describing CR status
+ - description: Auxiliary message describing CR status
jsonPath: .status.message
name: Message
type: string
@@ -354,7 +354,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -369,7 +368,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -535,7 +533,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -550,7 +547,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -713,7 +709,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -728,7 +723,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -894,7 +888,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -909,7 +902,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1037,7 +1029,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -1086,7 +1078,7 @@ spec:
type: string
type: object
type:
- description: 'Type: enterpriseSecurity for now, can accomodate
+ description: 'Type: enterpriseSecurity for now, can accommodate
itsi etc.. later'
type: string
type: object
@@ -1390,10 +1382,11 @@ spec:
environment variables)
type: string
imagePullPolicy:
- description: 'Sets pull policy for all images (either “Always” or
- the default: “IfNotPresent”)'
+ description: 'Sets pull policy for all images ("Always", "Never",
+ or the default: "IfNotPresent")'
enum:
- Always
+ - Never
- IfNotPresent
type: string
imagePullSecrets:
@@ -2026,13 +2019,12 @@ spec:
type: object
trafficDistribution:
description: |-
- TrafficDistribution offers a way to express preferences for how traffic is
- distributed to Service endpoints. Implementations can use this field as a
- hint, but are not required to guarantee strict adherence. If the field is
- not set, the implementation will apply its default routing strategy. If set
- to "PreferClose", implementations should prioritize endpoints that are
- topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ TrafficDistribution offers a way to express preferences for how traffic
+ is distributed to Service endpoints. Implementations can use this field
+ as a hint, but are not required to guarantee strict adherence. If the
+ field is not set, the implementation will apply its default routing
+ strategy. If set to "PreferClose", implementations should prioritize
+ endpoints that are in the same zone.
type: string
type:
description: |-
@@ -2160,6 +2152,8 @@ spec:
Ports is a list of records of service ports
If used, every port defined in the service should have an entry in it
items:
+ description: PortStatus represents the error condition
+ of a service port
properties:
error:
description: |-
@@ -2500,7 +2494,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -2511,7 +2504,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -2581,6 +2573,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@@ -2612,8 +2606,10 @@ spec:
- volumeID
type: object
azureDisk:
- description: azureDisk represents an Azure Data Disk mount on
- the host and bind mount to the pod.
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
@@ -2651,8 +2647,10 @@ spec:
- diskURI
type: object
azureFile:
- description: azureFile represents an Azure File Service mount
- on the host and bind mount to the pod.
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@@ -2671,8 +2669,9 @@ spec:
- shareName
type: object
cephfs:
- description: cephFS represents a Ceph FS mount on the host that
- shares a pod's lifetime
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@@ -2724,6 +2723,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@@ -2833,8 +2834,7 @@ spec:
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
- storage that is handled by certain external CSI drivers (Beta
- feature).
+ storage that is handled by certain external CSI drivers.
properties:
driver:
description: |-
@@ -3300,6 +3300,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to use for
@@ -3345,9 +3346,9 @@ spec:
- driver
type: object
flocker:
- description: flocker represents a Flocker volume attached to
- a kubelet's host machine. This depends on the Flocker control
- service being running
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@@ -3363,6 +3364,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@@ -3398,7 +3401,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
- DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@@ -3422,6 +3425,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@@ -3481,7 +3485,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
- Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -3631,8 +3635,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
- description: photonPersistentDisk represents a PhotonController
- persistent disk attached and mounted on kubelets host machine
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@@ -3648,8 +3653,11 @@ spec:
- pdID
type: object
portworxVolume:
- description: portworxVolume represents a portworx volume attached
- and mounted on kubelets host machine
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
properties:
fsType:
description: |-
@@ -4014,8 +4022,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
- description: quobyte represents a Quobyte mount on the host
- that shares a pod's lifetime
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@@ -4054,6 +4063,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@@ -4126,8 +4136,9 @@ spec:
- monitors
type: object
scaleIO:
- description: scaleIO represents a ScaleIO persistent volume
- attached and mounted on Kubernetes nodes.
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@@ -4259,8 +4270,9 @@ spec:
type: string
type: object
storageos:
- description: storageOS represents a StorageOS volume attached
- and mounted on Kubernetes nodes.
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@@ -4305,8 +4317,10 @@ spec:
type: string
type: object
vsphereVolume:
- description: vsphereVolume represents a vSphere volume attached
- and mounted on kubelets host machine
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
@@ -4389,7 +4403,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now,
- can accomodate itsi etc.. later'
+ can accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -4440,7 +4454,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -4630,7 +4644,7 @@ spec:
type: boolean
type: object
message:
- description: Auxillary message describing CR status
+ description: Auxiliary message describing CR status
type: string
phase:
description: current phase of the cluster manager
diff --git a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml
index bfd9e330d..3daa773d4 100644
--- a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml
+++ b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml
@@ -350,7 +350,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -365,7 +364,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -531,7 +529,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -546,7 +543,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -709,7 +705,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -724,7 +719,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -890,7 +884,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -905,7 +898,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1033,7 +1025,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -1082,7 +1074,7 @@ spec:
type: string
type: object
type:
- description: 'Type: enterpriseSecurity for now, can accomodate
+ description: 'Type: enterpriseSecurity for now, can accommodate
itsi etc.. later'
type: string
type: object
@@ -1386,10 +1378,11 @@ spec:
environment variables)
type: string
imagePullPolicy:
- description: 'Sets pull policy for all images (either “Always” or
- the default: “IfNotPresent”)'
+ description: 'Sets pull policy for all images ("Always", "Never",
+ or the default: "IfNotPresent")'
enum:
- Always
+ - Never
- IfNotPresent
type: string
imagePullSecrets:
@@ -2022,13 +2015,12 @@ spec:
type: object
trafficDistribution:
description: |-
- TrafficDistribution offers a way to express preferences for how traffic is
- distributed to Service endpoints. Implementations can use this field as a
- hint, but are not required to guarantee strict adherence. If the field is
- not set, the implementation will apply its default routing strategy. If set
- to "PreferClose", implementations should prioritize endpoints that are
- topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ TrafficDistribution offers a way to express preferences for how traffic
+ is distributed to Service endpoints. Implementations can use this field
+ as a hint, but are not required to guarantee strict adherence. If the
+ field is not set, the implementation will apply its default routing
+ strategy. If set to "PreferClose", implementations should prioritize
+ endpoints that are in the same zone.
type: string
type:
description: |-
@@ -2156,6 +2148,8 @@ spec:
Ports is a list of records of service ports
If used, every port defined in the service should have an entry in it
items:
+ description: PortStatus represents the error condition
+ of a service port
properties:
error:
description: |-
@@ -2496,7 +2490,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -2507,7 +2500,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -2577,6 +2569,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@@ -2608,8 +2602,10 @@ spec:
- volumeID
type: object
azureDisk:
- description: azureDisk represents an Azure Data Disk mount on
- the host and bind mount to the pod.
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
@@ -2647,8 +2643,10 @@ spec:
- diskURI
type: object
azureFile:
- description: azureFile represents an Azure File Service mount
- on the host and bind mount to the pod.
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@@ -2667,8 +2665,9 @@ spec:
- shareName
type: object
cephfs:
- description: cephFS represents a Ceph FS mount on the host that
- shares a pod's lifetime
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@@ -2720,6 +2719,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@@ -2829,8 +2830,7 @@ spec:
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
- storage that is handled by certain external CSI drivers (Beta
- feature).
+ storage that is handled by certain external CSI drivers.
properties:
driver:
description: |-
@@ -3296,6 +3296,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to use for
@@ -3341,9 +3342,9 @@ spec:
- driver
type: object
flocker:
- description: flocker represents a Flocker volume attached to
- a kubelet's host machine. This depends on the Flocker control
- service being running
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@@ -3359,6 +3360,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@@ -3394,7 +3397,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
- DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@@ -3418,6 +3421,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@@ -3477,7 +3481,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
- Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -3627,8 +3631,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
- description: photonPersistentDisk represents a PhotonController
- persistent disk attached and mounted on kubelets host machine
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@@ -3644,8 +3649,11 @@ spec:
- pdID
type: object
portworxVolume:
- description: portworxVolume represents a portworx volume attached
- and mounted on kubelets host machine
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
properties:
fsType:
description: |-
@@ -4010,8 +4018,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
- description: quobyte represents a Quobyte mount on the host
- that shares a pod's lifetime
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@@ -4050,6 +4059,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@@ -4122,8 +4132,9 @@ spec:
- monitors
type: object
scaleIO:
- description: scaleIO represents a ScaleIO persistent volume
- attached and mounted on Kubernetes nodes.
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@@ -4255,8 +4266,9 @@ spec:
type: string
type: object
storageos:
- description: storageOS represents a StorageOS volume attached
- and mounted on Kubernetes nodes.
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@@ -4301,8 +4313,10 @@ spec:
type: string
type: object
vsphereVolume:
- description: vsphereVolume represents a vSphere volume attached
- and mounted on kubelets host machine
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
@@ -4385,7 +4399,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now,
- can accomodate itsi etc.. later'
+ can accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -4436,7 +4450,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml
index 5e30a273f..dec160160 100644
--- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml
+++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml
@@ -357,7 +357,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -372,7 +371,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -538,7 +536,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -553,7 +550,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -716,7 +712,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -731,7 +726,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -897,7 +891,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -912,7 +905,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1238,10 +1230,11 @@ spec:
environment variables)
type: string
imagePullPolicy:
- description: 'Sets pull policy for all images (either “Always” or
- the default: “IfNotPresent”)'
+ description: 'Sets pull policy for all images ("Always", "Never",
+ or the default: "IfNotPresent")'
enum:
- Always
+ - Never
- IfNotPresent
type: string
imagePullSecrets:
@@ -1879,13 +1872,12 @@ spec:
type: object
trafficDistribution:
description: |-
- TrafficDistribution offers a way to express preferences for how traffic is
- distributed to Service endpoints. Implementations can use this field as a
- hint, but are not required to guarantee strict adherence. If the field is
- not set, the implementation will apply its default routing strategy. If set
- to "PreferClose", implementations should prioritize endpoints that are
- topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ TrafficDistribution offers a way to express preferences for how traffic
+ is distributed to Service endpoints. Implementations can use this field
+ as a hint, but are not required to guarantee strict adherence. If the
+ field is not set, the implementation will apply its default routing
+ strategy. If set to "PreferClose", implementations should prioritize
+ endpoints that are in the same zone.
type: string
type:
description: |-
@@ -2013,6 +2005,8 @@ spec:
Ports is a list of records of service ports
If used, every port defined in the service should have an entry in it
items:
+ description: PortStatus represents the error condition
+ of a service port
properties:
error:
description: |-
@@ -2236,7 +2230,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -2247,7 +2240,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -2317,6 +2309,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@@ -2348,8 +2342,10 @@ spec:
- volumeID
type: object
azureDisk:
- description: azureDisk represents an Azure Data Disk mount on
- the host and bind mount to the pod.
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
@@ -2387,8 +2383,10 @@ spec:
- diskURI
type: object
azureFile:
- description: azureFile represents an Azure File Service mount
- on the host and bind mount to the pod.
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@@ -2407,8 +2405,9 @@ spec:
- shareName
type: object
cephfs:
- description: cephFS represents a Ceph FS mount on the host that
- shares a pod's lifetime
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@@ -2460,6 +2459,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@@ -2569,8 +2570,7 @@ spec:
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
- storage that is handled by certain external CSI drivers (Beta
- feature).
+ storage that is handled by certain external CSI drivers.
properties:
driver:
description: |-
@@ -3036,6 +3036,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to use for
@@ -3081,9 +3082,9 @@ spec:
- driver
type: object
flocker:
- description: flocker represents a Flocker volume attached to
- a kubelet's host machine. This depends on the Flocker control
- service being running
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@@ -3099,6 +3100,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@@ -3134,7 +3137,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
- DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@@ -3158,6 +3161,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@@ -3217,7 +3221,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
- Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -3367,8 +3371,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
- description: photonPersistentDisk represents a PhotonController
- persistent disk attached and mounted on kubelets host machine
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@@ -3384,8 +3389,11 @@ spec:
- pdID
type: object
portworxVolume:
- description: portworxVolume represents a portworx volume attached
- and mounted on kubelets host machine
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
properties:
fsType:
description: |-
@@ -3750,8 +3758,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
- description: quobyte represents a Quobyte mount on the host
- that shares a pod's lifetime
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@@ -3790,6 +3799,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@@ -3862,8 +3872,9 @@ spec:
- monitors
type: object
scaleIO:
- description: scaleIO represents a ScaleIO persistent volume
- attached and mounted on Kubernetes nodes.
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@@ -3995,8 +4006,9 @@ spec:
type: string
type: object
storageos:
- description: storageOS represents a StorageOS volume attached
- and mounted on Kubernetes nodes.
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@@ -4041,8 +4053,10 @@ spec:
type: string
type: object
vsphereVolume:
- description: vsphereVolume represents a vSphere volume attached
- and mounted on kubelets host machine
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
@@ -4210,7 +4224,7 @@ spec:
jsonPath: .metadata.creationTimestamp
name: Age
type: date
- - description: Auxillary message describing CR status
+ - description: Auxiliary message describing CR status
jsonPath: .status.message
name: Message
type: string
@@ -4529,7 +4543,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -4544,7 +4557,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -4710,7 +4722,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -4725,7 +4736,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -4888,7 +4898,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -4903,7 +4912,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5069,7 +5077,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5084,7 +5091,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5410,10 +5416,11 @@ spec:
environment variables)
type: string
imagePullPolicy:
- description: 'Sets pull policy for all images (either “Always” or
- the default: “IfNotPresent”)'
+ description: 'Sets pull policy for all images ("Always", "Never",
+ or the default: "IfNotPresent")'
enum:
- Always
+ - Never
- IfNotPresent
type: string
imagePullSecrets:
@@ -5604,6 +5611,92 @@ spec:
type: string
type: object
x-kubernetes-map-type: atomic
+ objectStorageRef:
+ description: Object Storage reference
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ queueRef:
+ description: Queue reference
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
readinessInitialDelaySeconds:
description: |-
ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe
@@ -6051,13 +6144,12 @@ spec:
type: object
trafficDistribution:
description: |-
- TrafficDistribution offers a way to express preferences for how traffic is
- distributed to Service endpoints. Implementations can use this field as a
- hint, but are not required to guarantee strict adherence. If the field is
- not set, the implementation will apply its default routing strategy. If set
- to "PreferClose", implementations should prioritize endpoints that are
- topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ TrafficDistribution offers a way to express preferences for how traffic
+ is distributed to Service endpoints. Implementations can use this field
+ as a hint, but are not required to guarantee strict adherence. If the
+ field is not set, the implementation will apply its default routing
+ strategy. If set to "PreferClose", implementations should prioritize
+ endpoints that are in the same zone.
type: string
type:
description: |-
@@ -6185,6 +6277,8 @@ spec:
Ports is a list of records of service ports
If used, every port defined in the service should have an entry in it
items:
+ description: PortStatus represents the error condition
+ of a service port
properties:
error:
description: |-
@@ -6408,7 +6502,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -6419,7 +6512,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -6489,6 +6581,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@@ -6520,8 +6614,10 @@ spec:
- volumeID
type: object
azureDisk:
- description: azureDisk represents an Azure Data Disk mount on
- the host and bind mount to the pod.
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
@@ -6559,8 +6655,10 @@ spec:
- diskURI
type: object
azureFile:
- description: azureFile represents an Azure File Service mount
- on the host and bind mount to the pod.
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@@ -6579,8 +6677,9 @@ spec:
- shareName
type: object
cephfs:
- description: cephFS represents a Ceph FS mount on the host that
- shares a pod's lifetime
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@@ -6632,6 +6731,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@@ -6741,8 +6842,7 @@ spec:
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
- storage that is handled by certain external CSI drivers (Beta
- feature).
+ storage that is handled by certain external CSI drivers.
properties:
driver:
description: |-
@@ -7208,6 +7308,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to use for
@@ -7253,9 +7354,9 @@ spec:
- driver
type: object
flocker:
- description: flocker represents a Flocker volume attached to
- a kubelet's host machine. This depends on the Flocker control
- service being running
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@@ -7271,6 +7372,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@@ -7306,7 +7409,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
- DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@@ -7330,6 +7433,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@@ -7389,7 +7493,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
- Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -7539,8 +7643,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
- description: photonPersistentDisk represents a PhotonController
- persistent disk attached and mounted on kubelets host machine
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@@ -7556,8 +7661,11 @@ spec:
- pdID
type: object
portworxVolume:
- description: portworxVolume represents a portworx volume attached
- and mounted on kubelets host machine
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
properties:
fsType:
description: |-
@@ -7922,8 +8030,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
- description: quobyte represents a Quobyte mount on the host
- that shares a pod's lifetime
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@@ -7962,6 +8071,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@@ -8034,8 +8144,9 @@ spec:
- monitors
type: object
scaleIO:
- description: scaleIO represents a ScaleIO persistent volume
- attached and mounted on Kubernetes nodes.
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@@ -8167,8 +8278,9 @@ spec:
type: string
type: object
storageos:
- description: storageOS represents a StorageOS volume attached
- and mounted on Kubernetes nodes.
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@@ -8213,8 +8325,10 @@ spec:
type: string
type: object
vsphereVolume:
- description: vsphereVolume represents a vSphere volume attached
- and mounted on kubelets host machine
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
@@ -8242,6 +8356,13 @@ spec:
type: object
type: array
type: object
+ x-kubernetes-validations:
+ - message: queueRef and objectStorageRef must both be set or both be empty
+ rule: has(self.queueRef) == has(self.objectStorageRef)
+ - message: queueRef is immutable once created
+ rule: '!has(oldSelf.queueRef) || self.queueRef == oldSelf.queueRef'
+ - message: objectStorageRef is immutable once created
+ rule: '!has(oldSelf.objectStorageRef) || self.objectStorageRef == oldSelf.objectStorageRef'
status:
description: IndexerClusterStatus defines the observed state of a Splunk
Enterprise indexer cluster
@@ -8273,6 +8394,11 @@ spec:
- Terminating
- Error
type: string
+ credentialSecretVersion:
+ description: Credential secret version to track changes to the secret
+ and trigger rolling restart of indexer cluster peers when the secret
+ is updated
+ type: string
indexer_secret_changed_flag:
description: Indicates when the idxc_secret has been changed for a
peer
@@ -8289,7 +8415,7 @@ spec:
description: Indicates if the cluster is in maintenance mode.
type: boolean
message:
- description: Auxillary message describing CR status
+ description: Auxiliary message describing CR status
type: string
namespace_scoped_secret_resource_version:
description: Indicates resource version of namespace scoped secret
@@ -8350,6 +8476,11 @@ spec:
description: Indicates whether the manager is ready to begin servicing,
based on whether it is initialized.
type: boolean
+ serviceAccount:
+ description: Service account to track changes to the service account
+ and trigger rolling restart of indexer cluster peers when the service
+ account is updated
+ type: string
type: object
type: object
served: true
diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml
new file mode 100644
index 000000000..380109fb8
--- /dev/null
+++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml
@@ -0,0 +1,4667 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.18.0
+ name: ingestorclusters.enterprise.splunk.com
+spec:
+ group: enterprise.splunk.com
+ names:
+ kind: IngestorCluster
+ listKind: IngestorClusterList
+ plural: ingestorclusters
+ shortNames:
+ - ing
+ singular: ingestorcluster
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Status of ingestor cluster pods
+ jsonPath: .status.phase
+ name: Phase
+ type: string
+ - description: Number of desired ingestor cluster pods
+ jsonPath: .status.replicas
+ name: Desired
+ type: integer
+ - description: Current number of ready ingestor cluster pods
+ jsonPath: .status.readyReplicas
+ name: Ready
+ type: integer
+ - description: Age of ingestor cluster resource
+ jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - description: Auxillary message describing CR status
+ jsonPath: .status.message
+ name: Message
+ type: string
+ name: v4
+ schema:
+ openAPIV3Schema:
+ description: IngestorCluster is the Schema for the ingestorclusters API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: IngestorClusterSpec defines the spec of Ingestor Cluster
+ properties:
+ Mock:
+ description: Mock to differentiate between UTs and actual reconcile
+ type: boolean
+ affinity:
+ description: Kubernetes Affinity rules that control how pods are assigned
+ to particular nodes.
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the
+ pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the
+ corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching the corresponding
+ nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms.
+ The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate
+ this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g.
+ avoid putting this pod in the same node, zone, etc. as some
+ other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: object
+ appRepo:
+ description: Splunk Enterprise app repository that specifies remote
+ app location and scope for Splunk app management
+ properties:
+ appInstallPeriodSeconds:
+ default: 90
+ description: |-
+ App installation period within a reconcile. Apps will be installed during this period before the next reconcile is attempted.
+ Note: Do not change this setting unless instructed to do so by Splunk Support
+ format: int64
+ minimum: 30
+ type: integer
+ appSources:
+ description: List of App sources on remote storage
+ items:
+ description: AppSourceSpec defines list of App package (*.spl,
+ *.tgz) locations on remote volumes
+ properties:
+ location:
+ description: Location relative to the volume path
+ type: string
+ name:
+ description: Logical name for the set of apps placed in
+ this location. Logical name must be unique to the appRepo
+ type: string
+ premiumAppsProps:
+ description: Properties for premium apps, fill in when scope
+ premiumApps is chosen
+ properties:
+ esDefaults:
+ description: Enterpreise Security App defaults
+ properties:
+ sslEnablement:
+ description: "Sets the sslEnablement value for ES
+ app installation\n strict: Ensure that SSL
+ is enabled\n in the web.conf configuration
+ file to use\n this mode. Otherwise,
+ the installer exists\n\t \t with an error.
+ This is the DEFAULT mode used\n by
+ the operator if left empty.\n auto: Enables
+ SSL in the etc/system/local/web.conf\n configuration
+ file.\n ignore: Ignores whether SSL is enabled
+ or disabled."
+ type: string
+ type: object
+ type:
+ description: 'Type: enterpriseSecurity for now, can
+ accommodate itsi etc.. later'
+ type: string
+ type: object
+ scope:
+ description: 'Scope of the App deployment: cluster, clusterWithPreConfig,
+ local, premiumApps. Scope determines whether the App(s)
+ is/are installed locally, cluster-wide or its a premium
+ app'
+ type: string
+ volumeName:
+ description: Remote Storage Volume name
+ type: string
+ type: object
+ type: array
+ appsRepoPollIntervalSeconds:
+ description: |-
+ Interval in seconds to check the Remote Storage for App changes.
+ The default value for this config is 1 hour(3600 sec),
+ minimum value is 1 minute(60sec) and maximum value is 1 day(86400 sec).
+ We assign the value based on following conditions -
+ 1. If no value or 0 is specified then it means periodic polling is disabled.
+ 2. If anything less than min is specified then we set it to 1 min.
+ 3. If anything more than the max value is specified then we set it to 1 day.
+ format: int64
+ type: integer
+ defaults:
+ description: Defines the default configuration settings for App
+ sources
+ properties:
+ premiumAppsProps:
+ description: Properties for premium apps, fill in when scope
+ premiumApps is chosen
+ properties:
+ esDefaults:
+ description: Enterpreise Security App defaults
+ properties:
+ sslEnablement:
+ description: "Sets the sslEnablement value for ES
+ app installation\n strict: Ensure that SSL is
+ enabled\n in the web.conf configuration
+ file to use\n this mode. Otherwise, the
+ installer exists\n\t \t with an error. This
+ is the DEFAULT mode used\n by the operator
+ if left empty.\n auto: Enables SSL in the etc/system/local/web.conf\n
+ \ configuration file.\n ignore: Ignores
+ whether SSL is enabled or disabled."
+ type: string
+ type: object
+ type:
+ description: 'Type: enterpriseSecurity for now, can accommodate
+ itsi etc.. later'
+ type: string
+ type: object
+ scope:
+ description: 'Scope of the App deployment: cluster, clusterWithPreConfig,
+ local, premiumApps. Scope determines whether the App(s)
+ is/are installed locally, cluster-wide or its a premium
+ app'
+ type: string
+ volumeName:
+ description: Remote Storage Volume name
+ type: string
+ type: object
+ installMaxRetries:
+ default: 2
+ description: Maximum number of retries to install Apps
+ format: int32
+ minimum: 0
+ type: integer
+ maxConcurrentAppDownloads:
+ description: Maximum number of apps that can be downloaded at
+ same time
+ format: int64
+ type: integer
+ volumes:
+ description: List of remote storage volumes
+ items:
+ description: VolumeSpec defines remote volume config
+ properties:
+ endpoint:
+ description: Remote volume URI
+ type: string
+ name:
+ description: Remote volume name
+ type: string
+ path:
+ description: Remote volume path
+ type: string
+ provider:
+ description: 'App Package Remote Store provider. Supported
+ values: aws, minio, azure, gcp.'
+ type: string
+ region:
+ description: Region of the remote storage volume where apps
+ reside. Used for aws, if provided. Not used for minio
+ and azure.
+ type: string
+ secretRef:
+ description: Secret object name
+ type: string
+ storageType:
+ description: 'Remote Storage type. Supported values: s3,
+ blob, gcs. s3 works with aws or minio providers, whereas
+ blob works with azure provider, gcs works for gcp.'
+ type: string
+ type: object
+ type: array
+ type: object
+ clusterManagerRef:
+ description: ClusterManagerRef refers to a Splunk Enterprise indexer
+ cluster managed by the operator within Kubernetes
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ clusterMasterRef:
+ description: ClusterMasterRef refers to a Splunk Enterprise indexer
+ cluster managed by the operator within Kubernetes
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ defaults:
+ description: Inline map of default.yml overrides used to initialize
+ the environment
+ type: string
+ defaultsUrl:
+ description: Full path or URL for one or more default.yml files, separated
+ by commas
+ type: string
+ defaultsUrlApps:
+ description: |-
+ Full path or URL for one or more defaults.yml files specific
+ to App install, separated by commas. The defaults listed here
+ will be installed on the CM, standalone, search head deployer
+ or license manager instance.
+ type: string
+ etcVolumeStorageConfig:
+ description: Storage configuration for /opt/splunk/etc volume
+ properties:
+ ephemeralStorage:
+ description: |-
+ If true, ephemeral (emptyDir) storage will be used
+ default false
+ type: boolean
+ storageCapacity:
+ description: Storage capacity to request persistent volume claims
+ (default=”10Gi” for etc and "100Gi" for var)
+ type: string
+ storageClassName:
+ description: Name of StorageClass to use for persistent volume
+ claims
+ type: string
+ type: object
+ extraEnv:
+ description: |-
+ ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers
+ WARNING: Setting environment variables used by Splunk or Ansible will affect Splunk installation and operation
+ items:
+ description: EnvVar represents an environment variable present in
+ a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value. Cannot
+ be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is
+ written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the exposed
+ resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE
+ environment variables)
+ type: string
+ imagePullPolicy:
+ description: 'Sets pull policy for all images ("Always", "Never",
+ or the default: "IfNotPresent")'
+ enum:
+ - Always
+ - Never
+ - IfNotPresent
+ type: string
+ imagePullSecrets:
+ description: |-
+ Sets imagePullSecrets if image is being pulled from a private registry.
+ See https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ items:
+ description: |-
+ LocalObjectReference contains enough information to let you locate the
+ referenced object inside the same namespace.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ licenseManagerRef:
+ description: LicenseManagerRef refers to a Splunk Enterprise license
+ manager managed by the operator within Kubernetes
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ licenseMasterRef:
+ description: LicenseMasterRef refers to a Splunk Enterprise license
+ manager managed by the operator within Kubernetes
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ licenseUrl:
+ description: Full path or URL for a Splunk Enterprise license file
+ type: string
+ livenessInitialDelaySeconds:
+ description: |-
+ LivenessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) for the Liveness probe
+ Note: If needed, Operator overrides with a higher value
+ format: int32
+ minimum: 0
+ type: integer
+ livenessProbe:
+ description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command
+ properties:
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to be
+ considered failed after having succeeded.
+ format: int32
+ type: integer
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe.
+ format: int32
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ monitoringConsoleRef:
+ description: MonitoringConsoleRef refers to a Splunk Enterprise monitoring
+ console managed by the operator within Kubernetes
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ objectStorageRef:
+ description: Object Storage reference
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ queueRef:
+ description: Queue reference
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ readinessInitialDelaySeconds:
+ description: |-
+ ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe
+ Note: If needed, Operator overrides with a higher value
+ format: int32
+ minimum: 0
+ type: integer
+ readinessProbe:
+ description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes
+ properties:
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to be
+ considered failed after having succeeded.
+ format: int32
+ type: integer
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe.
+ format: int32
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ replicas:
+ default: 1
+ description: Number of ingestor pods
+ format: int32
+ minimum: 1
+ type: integer
+ resources:
+ description: resource requirements for the pod containers
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ schedulerName:
+ description: Name of Scheduler to use for pod placement (defaults
+ to “default-scheduler”)
+ type: string
+ serviceAccount:
+ description: |-
+ ServiceAccount is the service account used by the pods deployed by the CRD.
+ If not specified uses the default serviceAccount for the namespace as per
+ https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server
+ type: string
+ serviceTemplate:
+ description: ServiceTemplate is a template used to create Kubernetes
+ services
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ type: object
+ spec:
+ description: |-
+ Spec defines the behavior of a service.
+ https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ allocateLoadBalancerNodePorts:
+ description: |-
+ allocateLoadBalancerNodePorts defines if NodePorts will be automatically
+ allocated for services with type LoadBalancer. Default is "true". It
+ may be set to "false" if the cluster load-balancer does not rely on
+ NodePorts. If the caller requests specific NodePorts (by specifying a
+ value), those requests will be respected, regardless of this field.
+ This field may only be set for services with type LoadBalancer and will
+ be cleared if the type is changed to any other type.
+ type: boolean
+ clusterIP:
+ description: |-
+ clusterIP is the IP address of the service and is usually assigned
+ randomly. If an address is specified manually, is in-range (as per
+ system configuration), and is not in use, it will be allocated to the
+ service; otherwise creation of the service will fail. This field may not
+ be changed through updates unless the type field is also being changed
+ to ExternalName (which requires this field to be blank) or the type
+ field is being changed from ExternalName (in which case this field may
+ optionally be specified, as describe above). Valid values are "None",
+ empty string (""), or a valid IP address. Setting this to "None" makes a
+ "headless service" (no virtual IP), which is useful when direct endpoint
+ connections are preferred and proxying is not required. Only applies to
+ types ClusterIP, NodePort, and LoadBalancer. If this field is specified
+ when creating a Service of type ExternalName, creation will fail. This
+ field will be wiped when updating a Service to type ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ clusterIPs:
+ description: |-
+ ClusterIPs is a list of IP addresses assigned to this service, and are
+ usually assigned randomly. If an address is specified manually, is
+ in-range (as per system configuration), and is not in use, it will be
+ allocated to the service; otherwise creation of the service will fail.
+ This field may not be changed through updates unless the type field is
+ also being changed to ExternalName (which requires this field to be
+ empty) or the type field is being changed from ExternalName (in which
+ case this field may optionally be specified, as describe above). Valid
+ values are "None", empty string (""), or a valid IP address. Setting
+ this to "None" makes a "headless service" (no virtual IP), which is
+ useful when direct endpoint connections are preferred and proxying is
+ not required. Only applies to types ClusterIP, NodePort, and
+ LoadBalancer. If this field is specified when creating a Service of type
+ ExternalName, creation will fail. This field will be wiped when updating
+ a Service to type ExternalName. If this field is not specified, it will
+ be initialized from the clusterIP field. If this field is specified,
+ clients must ensure that clusterIPs[0] and clusterIP have the same
+ value.
+
+ This field may hold a maximum of two entries (dual-stack IPs, in either order).
+ These IPs must correspond to the values of the ipFamilies field. Both
+ clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalIPs:
+ description: |-
+ externalIPs is a list of IP addresses for which nodes in the cluster
+ will also accept traffic for this service. These IPs are not managed by
+ Kubernetes. The user is responsible for ensuring that traffic arrives
+ at a node with this IP. A common example is external load-balancers
+ that are not part of the Kubernetes system.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalName:
+ description: |-
+ externalName is the external reference that discovery mechanisms will
+ return as an alias for this service (e.g. a DNS CNAME record). No
+ proxying will be involved. Must be a lowercase RFC-1123 hostname
+ (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".
+ type: string
+ externalTrafficPolicy:
+ description: |-
+ externalTrafficPolicy describes how nodes distribute service traffic they
+ receive on one of the Service's "externally-facing" addresses (NodePorts,
+ ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure
+ the service in a way that assumes that external load balancers will take care
+ of balancing the service traffic between nodes, and so each node will deliver
+ traffic only to the node-local endpoints of the service, without masquerading
+ the client source IP. (Traffic mistakenly sent to a node with no endpoints will
+ be dropped.) The default value, "Cluster", uses the standard behavior of
+ routing to all endpoints evenly (possibly modified by topology and other
+ features). Note that traffic sent to an External IP or LoadBalancer IP from
+ within the cluster will always get "Cluster" semantics, but clients sending to
+ a NodePort from within the cluster may need to take traffic policy into account
+ when picking a node.
+ type: string
+ healthCheckNodePort:
+ description: |-
+ healthCheckNodePort specifies the healthcheck nodePort for the service.
+ This only applies when type is set to LoadBalancer and
+ externalTrafficPolicy is set to Local. If a value is specified, is
+ in-range, and is not in use, it will be used. If not specified, a value
+ will be automatically allocated. External systems (e.g. load-balancers)
+ can use this port to determine if a given node holds endpoints for this
+ service or not. If this field is specified when creating a Service
+ which does not need it, creation will fail. This field will be wiped
+ when updating a Service to no longer need it (e.g. changing type).
+ This field cannot be updated once set.
+ format: int32
+ type: integer
+ internalTrafficPolicy:
+ description: |-
+ InternalTrafficPolicy describes how nodes distribute service traffic they
+ receive on the ClusterIP. If set to "Local", the proxy will assume that pods
+ only want to talk to endpoints of the service on the same node as the pod,
+ dropping the traffic if there are no local endpoints. The default value,
+ "Cluster", uses the standard behavior of routing to all endpoints evenly
+ (possibly modified by topology and other features).
+ type: string
+ ipFamilies:
+ description: |-
+ IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
+ service. This field is usually assigned automatically based on cluster
+ configuration and the ipFamilyPolicy field. If this field is specified
+ manually, the requested family is available in the cluster,
+ and ipFamilyPolicy allows it, it will be used; otherwise creation of
+ the service will fail. This field is conditionally mutable: it allows
+ for adding or removing a secondary IP family, but it does not allow
+ changing the primary IP family of the Service. Valid values are "IPv4"
+ and "IPv6". This field only applies to Services of types ClusterIP,
+ NodePort, and LoadBalancer, and does apply to "headless" services.
+ This field will be wiped when updating a Service to type ExternalName.
+
+ This field may hold a maximum of two entries (dual-stack families, in
+ either order). These families must correspond to the values of the
+ clusterIPs field, if specified. Both clusterIPs and ipFamilies are
+ governed by the ipFamilyPolicy field.
+ items:
+ description: |-
+ IPFamily represents the IP Family (IPv4 or IPv6). This type is used
+ to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies).
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ipFamilyPolicy:
+ description: |-
+ IPFamilyPolicy represents the dual-stack-ness requested or required by
+ this Service. If there is no value provided, then this field will be set
+ to SingleStack. Services can be "SingleStack" (a single IP family),
+ "PreferDualStack" (two IP families on dual-stack configured clusters or
+ a single IP family on single-stack clusters), or "RequireDualStack"
+ (two IP families on dual-stack configured clusters, otherwise fail). The
+ ipFamilies and clusterIPs fields depend on the value of this field. This
+ field will be wiped when updating a service to type ExternalName.
+ type: string
+ loadBalancerClass:
+ description: |-
+ loadBalancerClass is the class of the load balancer implementation this Service belongs to.
+ If specified, the value of this field must be a label-style identifier, with an optional prefix,
+ e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users.
+ This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load
+ balancer implementation is used, today this is typically done through the cloud provider integration,
+ but should apply for any default implementation. If set, it is assumed that a load balancer
+ implementation is watching for Services with a matching class. Any default load balancer
+ implementation (e.g. cloud providers) should ignore Services that set this field.
+ This field can only be set when creating or updating a Service to type 'LoadBalancer'.
+ Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
+ type: string
+ loadBalancerIP:
+ description: |-
+ Only applies to Service Type: LoadBalancer.
+ This feature depends on whether the underlying cloud-provider supports specifying
+ the loadBalancerIP when a load balancer is created.
+ This field will be ignored if the cloud-provider does not support the feature.
+ Deprecated: This field was under-specified and its meaning varies across implementations.
+ Using it is non-portable and it may not support dual-stack.
+ Users are encouraged to use implementation-specific annotations when available.
+ type: string
+ loadBalancerSourceRanges:
+ description: |-
+ If specified and supported by the platform, this will restrict traffic through the cloud-provider
+ load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+ cloud-provider does not support the feature."
+ More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ports:
+ description: |-
+ The list of ports that are exposed by this service.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ description: ServicePort contains information on service's
+ port.
+ properties:
+ appProtocol:
+ description: |-
+ The application protocol for this port.
+ This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+ This field follows standard Kubernetes label syntax.
+ Valid values are either:
+
+ * Un-prefixed protocol names - reserved for IANA standard service names (as per
+ RFC-6335 and https://www.iana.org/assignments/service-names).
+
+ * Kubernetes-defined prefixed names:
+ * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+ * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+
+ * Other protocols should use implementation-defined prefixed names such as
+ mycompany.com/my-custom-protocol.
+ type: string
+ name:
+ description: |-
+ The name of this port within the service. This must be a DNS_LABEL.
+ All ports within a ServiceSpec must have unique names. When considering
+ the endpoints for a Service, this must match the 'name' field in the
+ EndpointPort.
+ Optional if only one ServicePort is defined on this service.
+ type: string
+ nodePort:
+ description: |-
+ The port on each node on which this service is exposed when type is
+ NodePort or LoadBalancer. Usually assigned by the system. If a value is
+ specified, in-range, and not in use it will be used, otherwise the
+ operation will fail. If not specified, a port will be allocated if this
+ Service requires one. If this field is specified when creating a
+ Service which does not need it, creation will fail. This field will be
+ wiped when updating a Service to no longer need it (e.g. changing type
+ from NodePort to ClusterIP).
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ format: int32
+ type: integer
+ port:
+ description: The port that will be exposed by this service.
+ format: int32
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
+ Default is TCP.
+ type: string
+ targetPort:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the pods targeted by the service.
+ Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ If this is a string, it will be looked up as a named port in the
+ target Pod's container ports. If this is not specified, the value
+ of the 'port' field is used (an identity map).
+ This field is ignored for services with clusterIP=None, and should be
+ omitted or set equal to the 'port' field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ - protocol
+ x-kubernetes-list-type: map
+ publishNotReadyAddresses:
+ description: |-
+ publishNotReadyAddresses indicates that any agent which deals with endpoints for this
+ Service should disregard any indications of ready/not-ready.
+ The primary use case for setting this field is for a StatefulSet's Headless Service to
+ propagate SRV DNS records for its Pods for the purpose of peer discovery.
+ The Kubernetes controllers that generate Endpoints and EndpointSlice resources for
+ Services interpret this to mean that all endpoints are considered "ready" even if the
+ Pods themselves are not. Agents which consume only Kubernetes generated endpoints
+ through the Endpoints or EndpointSlice resources can safely assume this behavior.
+ type: boolean
+ selector:
+ additionalProperties:
+ type: string
+ description: |-
+ Route service traffic to pods with label keys and values matching this
+ selector. If empty or not present, the service is assumed to have an
+ external process managing its endpoints, which Kubernetes will not
+ modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
+ Ignored if type is ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ type: object
+ x-kubernetes-map-type: atomic
+ sessionAffinity:
+ description: |-
+ Supports "ClientIP" and "None". Used to maintain session affinity.
+ Enable client IP based session affinity.
+ Must be ClientIP or None.
+ Defaults to None.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ sessionAffinityConfig:
+ description: sessionAffinityConfig contains the configurations
+ of session affinity.
+ properties:
+ clientIP:
+ description: clientIP contains the configurations of Client
+ IP based session affinity.
+ properties:
+ timeoutSeconds:
+ description: |-
+ timeoutSeconds specifies the seconds of ClientIP type session sticky time.
+ The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
+ Default value is 10800(for 3 hours).
+ format: int32
+ type: integer
+ type: object
+ type: object
+ trafficDistribution:
+ description: |-
+ TrafficDistribution offers a way to express preferences for how traffic
+ is distributed to Service endpoints. Implementations can use this field
+ as a hint, but are not required to guarantee strict adherence. If the
+ field is not set, the implementation will apply its default routing
+ strategy. If set to "PreferClose", implementations should prioritize
+ endpoints that are in the same zone.
+ type: string
+ type:
+ description: |-
+ type determines how the Service is exposed. Defaults to ClusterIP. Valid
+ options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
+ "ClusterIP" allocates a cluster-internal IP address for load-balancing
+ to endpoints. Endpoints are determined by the selector or if that is not
+ specified, by manual construction of an Endpoints object or
+ EndpointSlice objects. If clusterIP is "None", no virtual IP is
+ allocated and the endpoints are published as a set of endpoints rather
+ than a virtual IP.
+ "NodePort" builds on ClusterIP and allocates a port on every node which
+ routes to the same endpoints as the clusterIP.
+ "LoadBalancer" builds on NodePort and creates an external load-balancer
+ (if supported in the current cloud) which routes to the same endpoints
+ as the clusterIP.
+ "ExternalName" aliases this service to the specified externalName.
+ Several other fields do not apply to ExternalName services.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ type: string
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the service.
+ Populated by the system.
+ Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ conditions:
+ description: Current service state
+ items:
+ description: Condition contains details for one aspect of
+ the current state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False,
+ Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ loadBalancer:
+ description: |-
+ LoadBalancer contains the current status of the load-balancer,
+ if one is present.
+ properties:
+ ingress:
+ description: |-
+ Ingress is a list containing ingress points for the load-balancer.
+ Traffic intended for the service should be sent to these ingress points.
+ items:
+ description: |-
+ LoadBalancerIngress represents the status of a load-balancer ingress point:
+ traffic intended for the service should be sent to an ingress point.
+ properties:
+ hostname:
+ description: |-
+ Hostname is set for load-balancer ingress points that are DNS based
+ (typically AWS load-balancers)
+ type: string
+ ip:
+ description: |-
+ IP is set for load-balancer ingress points that are IP based
+ (typically GCE or OpenStack load-balancers)
+ type: string
+ ipMode:
+ description: |-
+ IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified.
+ Setting this to "VIP" indicates that traffic is delivered to the node with
+ the destination set to the load-balancer's IP and port.
+ Setting this to "Proxy" indicates that traffic is delivered to the node or pod with
+ the destination set to the node's IP and node port or the pod's IP and port.
+ Service implementations may use this information to adjust traffic routing.
+ type: string
+ ports:
+ description: |-
+ Ports is a list of records of service ports
+ If used, every port defined in the service should have an entry in it
+ items:
+ description: PortStatus represents the error condition
+ of a service port
+ properties:
+ error:
+ description: |-
+ Error is to record the problem with the service port
+ The format of the error shall comply with the following rules:
+ - built-in error values shall be specified in this file and those shall use
+ CamelCase names
+ - cloud provider specific error values must have names that comply with the
+ format foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ port:
+ description: Port is the port number of the
+ service port of which status is recorded
+ here
+ format: int32
+ type: integer
+ protocol:
+ description: |-
+ Protocol is the protocol of the service port of which status is recorded here
+ The supported values are: "TCP", "UDP", "SCTP"
+ type: string
+ required:
+ - error
+ - port
+ - protocol
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: object
+ type: object
+ startupProbe:
+ description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes
+ properties:
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to be
+ considered failed after having succeeded.
+ format: int32
+ type: integer
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe.
+ format: int32
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ tolerations:
+ description: Pod's tolerations for Kubernetes node's taint
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: TopologySpreadConstraint https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching
+ pods among the given topology.
+ properties:
+ labelSelector:
+ description: |-
+ LabelSelector is used to find matching pods.
+ Pods that match this label selector are counted to determine the number of pods
+ in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select the pods over which
+ spreading will be calculated. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are ANDed with labelSelector
+ to select the group of existing pods over which spreading will be calculated
+ for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ MatchLabelKeys cannot be set when LabelSelector isn't set.
+ Keys that don't exist in the incoming pod labels will
+ be ignored. A null or empty list means only match against labelSelector.
+
+ This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ maxSkew:
+ description: |-
+ MaxSkew describes the degree to which pods may be unevenly distributed.
+ When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ between the number of matching pods in the target topology and the global minimum.
+ The global minimum is the minimum number of matching pods in an eligible domain
+ or zero if the number of eligible domains is less than MinDomains.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 2/2/1:
+ In this case, the global minimum is 1.
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P |
+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ violate MaxSkew(1).
+ - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ to topologies that satisfy it.
+ It's a required field. Default value is 1 and 0 is not allowed.
+ format: int32
+ type: integer
+ minDomains:
+ description: |-
+ MinDomains indicates a minimum number of eligible domains.
+ When the number of eligible domains with matching topology keys is less than minDomains,
+ Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ this value has no effect on scheduling.
+ As a result, when the number of eligible domains is less than minDomains,
+ scheduler won't schedule more than maxSkew Pods to those domains.
+ If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ Valid values are integers greater than 0.
+ When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+
+ For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ labelSelector spread as 2/2/2:
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P P |
+ The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ In this situation, new pod with the same labelSelector cannot be scheduled,
+ because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ it will violate MaxSkew.
+ format: int32
+ type: integer
+ nodeAffinityPolicy:
+ description: |-
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ when calculating pod topology spread skew. Options are:
+ - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+
+ If this value is nil, the behavior is equivalent to the Honor policy.
+ type: string
+ nodeTaintsPolicy:
+ description: |-
+ NodeTaintsPolicy indicates how we will treat node taints when calculating
+ pod topology spread skew. Options are:
+ - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ has a toleration, are included.
+ - Ignore: node taints are ignored. All nodes are included.
+
+ If this value is nil, the behavior is equivalent to the Ignore policy.
+ type: string
+ topologyKey:
+ description: |-
+ TopologyKey is the key of node labels. Nodes that have a label with this key
+ and identical values are considered to be in the same topology.
+ We consider each as a "bucket", and try to put balanced number
+ of pods into each bucket.
+ We define a domain as a particular instance of a topology.
+ Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ nodeAffinityPolicy and nodeTaintsPolicy.
+ e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: |-
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ the spread constraint.
+ - DoNotSchedule (default) tells the scheduler not to schedule it.
+ - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ but giving higher precedence to topologies that would help reduce the
+ skew.
+ A constraint is considered "Unsatisfiable" for an incoming pod
+ if and only if every possible node assignment for that pod would violate
+ "MaxSkew" on some topology.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 3/1/1:
+ | zone1 | zone2 | zone3 |
+ | P P P | P | P |
+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ won't make it *more* imbalanced.
+ It's a required field.
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ varVolumeStorageConfig:
+ description: Storage configuration for /opt/splunk/var volume
+ properties:
+ ephemeralStorage:
+ description: |-
+ If true, ephemeral (emptyDir) storage will be used
+ default false
+ type: boolean
+ storageCapacity:
+ description: Storage capacity to request persistent volume claims
+ (default=”10Gi” for etc and "100Gi" for var)
+ type: string
+ storageClassName:
+ description: Name of StorageClass to use for persistent volume
+ claims
+ type: string
+ type: object
+ volumes:
+ description: List of one or more Kubernetes volumes. These will be
+ mounted in all pod containers as as /mnt/
+ items:
+ description: Volume represents a named volume in a pod that may
+ be accessed by any container in the pod.
+ properties:
+ awsElasticBlockStore:
+ description: |-
+ awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly value true will force the readOnly setting in VolumeMounts.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: boolean
+ volumeID:
+ description: |-
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
+ properties:
+ cachingMode:
+ description: 'cachingMode is the Host Caching mode: None,
+ Read Only, Read Write.'
+ type: string
+ diskName:
+ description: diskName is the Name of the data disk in the
+ blob storage
+ type: string
+ diskURI:
+ description: diskURI is the URI of data disk in the blob
+ storage
+ type: string
+ fsType:
+ default: ext4
+ description: |-
+ fsType is Filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ kind:
+ description: 'kind expected values are Shared: multiple
+ blob disks per storage account Dedicated: single blob
+ disk per storage account Managed: azure managed data
+ disk (only in managed availability set). defaults to shared'
+ type: string
+ readOnly:
+ default: false
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
+ properties:
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretName:
+ description: secretName is the name of secret that contains
+ Azure Storage Account Name and Key
+ type: string
+ shareName:
+ description: shareName is the azure share Name
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
+ properties:
+ monitors:
+ description: |-
+ monitors is Required: Monitors is a collection of Ceph monitors
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: 'path is Optional: Used as the mounted root,
+ rather than the full Ceph tree, default is /'
+ type: string
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: boolean
+ secretFile:
+ description: |-
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ secretRef:
+ description: |-
+ secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is optional: User is the rados user name, default is admin
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ description: |-
+ cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is optional: points to a secret object containing parameters used to connect
+ to OpenStack.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeID:
+ description: |-
+ volumeID used to identify the volume in cinder.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ description: configMap represents a configMap that should populate
+ this volume
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional specify whether the ConfigMap or its
+ keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ csi:
+ description: csi (Container Storage Interface) represents ephemeral
+ storage that is handled by certain external CSI drivers.
+ properties:
+ driver:
+ description: |-
+ driver is the name of the CSI driver that handles this volume.
+ Consult with your admin for the correct name as registered in the cluster.
+ type: string
+ fsType:
+ description: |-
+ fsType to mount. Ex. "ext4", "xfs", "ntfs".
+ If not provided, the empty value is passed to the associated CSI driver
+ which will determine the default filesystem to apply.
+ type: string
+ nodePublishSecretRef:
+ description: |-
+ nodePublishSecretRef is a reference to the secret object containing
+ sensitive information to pass to the CSI driver to complete the CSI
+ NodePublishVolume and NodeUnpublishVolume calls.
+ This field is optional, and may be empty if no secret is required. If the
+ secret object contains more than one secret, all secret references are passed.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ readOnly:
+ description: |-
+ readOnly specifies a read-only configuration for the volume.
+ Defaults to false (read/write).
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ description: |-
+ volumeAttributes stores driver-specific properties that are passed to the CSI
+ driver. Consult your driver's documentation for supported values.
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ description: downwardAPI represents downward API about the pod
+ that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ Optional: mode bits to use on created files by default. Must be a
+ Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: Items is a list of downward API volume file
+ items:
+ description: DownwardAPIVolumeFile represents information
+ to create the file containing the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field of the pod:
+ only annotations, labels, name, namespace and uid
+ are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the relative path
+ name of the file to be created. Must not be absolute
+ or contain the ''..'' path. Must be utf-8 encoded.
+ The first item of the relative path must not start
+ with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ emptyDir:
+ description: |-
+ emptyDir represents a temporary directory that shares a pod's lifetime.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ properties:
+ medium:
+ description: |-
+ medium represents what type of storage medium should back this directory.
+ The default is "" which means to use the node's default medium.
+ Must be an empty string (default) or Memory.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ sizeLimit is the total amount of local storage required for this EmptyDir volume.
+ The size limit is also applicable for memory medium.
+ The maximum usage on memory medium EmptyDir would be the minimum value between
+ the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+ The default is nil which means that the limit is undefined.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ description: |-
+ ephemeral represents a volume that is handled by a cluster storage driver.
+ The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+ and deleted when the pod is removed.
+
+ Use this if:
+ a) the volume is only needed while the pod runs,
+ b) features of normal volumes like restoring from snapshot or capacity
+ tracking are needed,
+ c) the storage driver is specified through a storage class, and
+ d) the storage driver supports dynamic volume provisioning through
+ a PersistentVolumeClaim (see EphemeralVolumeSource for more
+ information on the connection between this volume type
+ and PersistentVolumeClaim).
+
+ Use PersistentVolumeClaim or one of the vendor-specific
+ APIs for volumes that persist for longer than the lifecycle
+ of an individual pod.
+
+ Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+ be used that way - see the documentation of the driver for
+ more information.
+
+ A pod can use both types of ephemeral volumes and
+ persistent volumes at the same time.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes
+ to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference
+ to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ description: fc represents a Fibre Channel resource that is
+ attached to a kubelet's host machine and then exposed to the
+ pod.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ lun:
+ description: 'lun is Optional: FC target lun number'
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ targetWWNs:
+ description: 'targetWWNs is Optional: FC target worldwide
+ names (WWNs)'
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ wwids:
+ description: |-
+ wwids Optional: FC volume world wide identifiers (wwids)
+ Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ flexVolume:
+ description: |-
+ flexVolume represents a generic volume resource that is
+ provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
+ properties:
+ driver:
+ description: driver is the name of the driver to use for
+ this volume.
+ type: string
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ description: 'options is Optional: this field holds extra
+ command options if any.'
+ type: object
+ readOnly:
+ description: |-
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is Optional: secretRef is reference to the secret object containing
+ sensitive information to pass to the plugin scripts. This may be
+ empty if no secret object is specified. If the secret object
+ contains more than one secret, all secrets are passed to the plugin
+ scripts.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - driver
+ type: object
+ flocker:
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
+ properties:
+ datasetName:
+ description: |-
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+ should be considered as deprecated
+ type: string
+ datasetUUID:
+ description: datasetUUID is the UUID of the dataset. This
+ is unique identifier of a Flocker dataset
+ type: string
+ type: object
+ gcePersistentDisk:
+ description: |-
+ gcePersistentDisk represents a GCE Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ format: int32
+ type: integer
+ pdName:
+ description: |-
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ description: |-
+ gitRepo represents a git repository at a particular revision.
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
+ EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ into the Pod's container.
+ properties:
+ directory:
+ description: |-
+ directory is the target directory name.
+ Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ git repository. Otherwise, if specified, the volume will contain the git repository in
+ the subdirectory with the given name.
+ type: string
+ repository:
+ description: repository is the URL
+ type: string
+ revision:
+ description: revision is the commit hash for the specified
+ revision.
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ description: |-
+ glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md
+ properties:
+ endpoints:
+ description: |-
+ endpoints is the endpoint name that details Glusterfs topology.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ path:
+ description: |-
+ path is the Glusterfs volume path.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ description: |-
+ hostPath represents a pre-existing file or directory on the host
+ machine that is directly exposed to the container. This is generally
+ used for system agents or other privileged things that are allowed
+ to see the host machine. Most containers will NOT need this.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ properties:
+ path:
+ description: |-
+ path of the directory on the host.
+ If the path is a symlink, it will follow the link to the real path.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ type:
+ description: |-
+ type for HostPath Volume
+ Defaults to ""
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ required:
+ - path
+ type: object
+ image:
+ description: |-
+ image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine.
+ The volume is resolved at pod startup depending on which PullPolicy value is provided:
+
+ - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+ - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+ - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+
+ The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
+ A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
+ The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
+ The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
+ The volume will be mounted read-only (ro) and non-executable files (noexec).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
+ The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
+ properties:
+ pullPolicy:
+ description: |-
+ Policy for pulling OCI objects. Possible values are:
+ Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+ Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+ IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ type: string
+ reference:
+ description: |-
+ Required: Image or artifact reference to be used.
+ Behaves in the same way as pod.spec.containers[*].image.
+ Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ type: object
+ iscsi:
+ description: |-
+ iscsi represents an ISCSI Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://examples.k8s.io/volumes/iscsi/README.md
+ properties:
+ chapAuthDiscovery:
+ description: chapAuthDiscovery defines whether support iSCSI
+ Discovery CHAP authentication
+ type: boolean
+ chapAuthSession:
+ description: chapAuthSession defines whether support iSCSI
+ Session CHAP authentication
+ type: boolean
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ type: string
+ initiatorName:
+ description: |-
+ initiatorName is the custom iSCSI Initiator Name.
+ If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ : will be created for the connection.
+ type: string
+ iqn:
+ description: iqn is the target iSCSI Qualified Name.
+ type: string
+ iscsiInterface:
+ default: default
+ description: |-
+ iscsiInterface is the interface Name that uses an iSCSI transport.
+ Defaults to 'default' (tcp).
+ type: string
+ lun:
+ description: lun represents iSCSI Target Lun number.
+ format: int32
+ type: integer
+ portals:
+ description: |-
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ type: boolean
+ secretRef:
+ description: secretRef is the CHAP Secret for iSCSI target
+ and initiator authentication
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ targetPortal:
+ description: |-
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ description: |-
+ name of the volume.
+ Must be a DNS_LABEL and unique within the pod.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ nfs:
+ description: |-
+ nfs represents an NFS mount on the host that shares a pod's lifetime
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ properties:
+ path:
+ description: |-
+ path that is exported by the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the NFS export to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: boolean
+ server:
+ description: |-
+ server is the hostname or IP address of the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ description: |-
+ persistentVolumeClaimVolumeSource represents a reference to a
+ PersistentVolumeClaim in the same namespace.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ properties:
+ claimName:
+ description: |-
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ type: string
+ readOnly:
+ description: |-
+ readOnly Will force the ReadOnly setting in VolumeMounts.
+ Default false.
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ pdID:
+ description: pdID is the ID that identifies Photon Controller
+ persistent disk
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
+ properties:
+ fsType:
+ description: |-
+ fSType represents the filesystem type to mount
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ volumeID:
+ description: volumeID uniquely identifies a Portworx volume
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ description: projected items for all in one resources secrets,
+ configmaps, and downward API
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: |-
+ sources is the list of volume projections. Each entry in this list
+ handles one source.
+ items:
+ description: |-
+ Projection that may be projected along with other supported volume types.
+ Exactly one of these fields must be set.
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of
+ label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description: Relative path from the volume root
+ to write the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description: configMap information about the configMap
+ data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description: downwardAPI information about the downwardAPI
+ data to project
+ properties:
+ items:
+ description: Items is a list of DownwardAPIVolume
+ file
+ items:
+ description: DownwardAPIVolumeFile represents
+ information to create the file containing
+ the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field
+ of the pod: only annotations, labels,
+ name, namespace and uid are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of,
+ defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the relative
+ path name of the file to be created. Must
+ not be absolute or contain the ''..''
+ path. Must be utf-8 encoded. The first
+ item of the relative path must not start
+ with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults
+ to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to
+ select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ secret:
+ description: secret information about the secret data
+ to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional field specify whether the
+ Secret or its key must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description: serviceAccountToken is information about
+ the serviceAccountToken data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ quobyte:
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
+ properties:
+ group:
+ description: |-
+ group to map volume access to
+ Default is no group
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+ Defaults to false.
+ type: boolean
+ registry:
+ description: |-
+ registry represents a single or multiple Quobyte Registry services
+ specified as a string as host:port pair (multiple entries are separated with commas)
+ which acts as the central registry for volumes
+ type: string
+ tenant:
+ description: |-
+ tenant owning the given Quobyte volume in the Backend
+ Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ type: string
+ user:
+ description: |-
+ user to map volume access to
+ Defaults to serivceaccount user
+ type: string
+ volume:
+ description: volume is a string that references an already
+ created Quobyte volume by name.
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ description: |-
+ rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
+ More info: https://examples.k8s.io/volumes/rbd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ type: string
+ image:
+ description: |-
+ image is the rados image name.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ keyring:
+ default: /etc/ceph/keyring
+ description: |-
+ keyring is the path to key ring for RBDUser.
+ Default is /etc/ceph/keyring.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ monitors:
+ description: |-
+ monitors is a collection of Ceph monitors.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ pool:
+ default: rbd
+ description: |-
+ pool is the rados pool name.
+ Default is rbd.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is name of the authentication secret for RBDUser. If provided
+ overrides keyring.
+ Default is nil.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ default: admin
+ description: |-
+ user is the rados user name.
+ Default is admin.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
+ properties:
+ fsType:
+ default: xfs
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs".
+ Default is "xfs".
+ type: string
+ gateway:
+ description: gateway is the host address of the ScaleIO
+ API Gateway.
+ type: string
+ protectionDomain:
+ description: protectionDomain is the name of the ScaleIO
+ Protection Domain for the configured storage.
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef references to the secret for ScaleIO user and other
+ sensitive information. If this is not provided, Login operation will fail.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ sslEnabled:
+ description: sslEnabled Flag enable/disable SSL communication
+ with Gateway, default false
+ type: boolean
+ storageMode:
+ default: ThinProvisioned
+ description: |-
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ Default is ThinProvisioned.
+ type: string
+ storagePool:
+ description: storagePool is the ScaleIO Storage Pool associated
+ with the protection domain.
+ type: string
+ system:
+ description: system is the name of the storage system as
+ configured in ScaleIO.
+ type: string
+ volumeName:
+ description: |-
+ volumeName is the name of a volume already created in the ScaleIO system
+ that is associated with this volume source.
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ description: |-
+ secret represents a secret that should populate this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values
+ for mode bits. Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items If unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ optional:
+ description: optional field specify whether the Secret or
+ its keys must be defined
+ type: boolean
+ secretName:
+ description: |-
+ secretName is the name of the secret in the pod's namespace to use.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ type: string
+ type: object
+ storageos:
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef specifies the secret to use for obtaining the StorageOS API
+ credentials. If not specified, default values will be attempted.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeName:
+ description: |-
+ volumeName is the human-readable name of the StorageOS volume. Volume
+ names are only unique within a namespace.
+ type: string
+ volumeNamespace:
+ description: |-
+ volumeNamespace specifies the scope of the volume within StorageOS. If no
+ namespace is specified then the Pod's namespace will be used. This allows the
+ Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ Set VolumeName to any name to override the default behaviour.
+ Set to "default" if you are not using namespaces within StorageOS.
+ Namespaces that do not pre-exist within StorageOS will be created.
+ type: string
+ type: object
+ vsphereVolume:
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ storagePolicyID:
+ description: storagePolicyID is the storage Policy Based
+ Management (SPBM) profile ID associated with the StoragePolicyName.
+ type: string
+ storagePolicyName:
+ description: storagePolicyName is the storage Policy Based
+ Management (SPBM) profile name.
+ type: string
+ volumePath:
+ description: volumePath is the path that identifies vSphere
+ volume vmdk
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ required:
+ - objectStorageRef
+ - queueRef
+ type: object
+ x-kubernetes-validations:
+ - message: queueRef is immutable once created
+ rule: self.queueRef == oldSelf.queueRef
+ - message: objectStorageRef is immutable once created
+ rule: self.objectStorageRef == oldSelf.objectStorageRef
+ status:
+ description: IngestorClusterStatus defines the observed state of Ingestor
+ Cluster
+ properties:
+ appContext:
+ description: App Framework context
+ properties:
+ appRepo:
+ description: List of App package (*.spl, *.tgz) locations on remote
+ volume
+ properties:
+ appInstallPeriodSeconds:
+ default: 90
+ description: |-
+ App installation period within a reconcile. Apps will be installed during this period before the next reconcile is attempted.
+ Note: Do not change this setting unless instructed to do so by Splunk Support
+ format: int64
+ minimum: 30
+ type: integer
+ appSources:
+ description: List of App sources on remote storage
+ items:
+ description: AppSourceSpec defines list of App package (*.spl,
+ *.tgz) locations on remote volumes
+ properties:
+ location:
+ description: Location relative to the volume path
+ type: string
+ name:
+ description: Logical name for the set of apps placed
+ in this location. Logical name must be unique to the
+ appRepo
+ type: string
+ premiumAppsProps:
+ description: Properties for premium apps, fill in when
+ scope premiumApps is chosen
+ properties:
+ esDefaults:
+ description: Enterpreise Security App defaults
+ properties:
+ sslEnablement:
+ description: "Sets the sslEnablement value for
+ ES app installation\n strict: Ensure that
+ SSL is enabled\n in the web.conf
+ configuration file to use\n this
+ mode. Otherwise, the installer exists\n\t
+ \ \t with an error. This is the DEFAULT
+ mode used\n by the operator if
+ left empty.\n auto: Enables SSL in the
+ etc/system/local/web.conf\n configuration
+ file.\n ignore: Ignores whether SSL is
+ enabled or disabled."
+ type: string
+ type: object
+ type:
+ description: 'Type: enterpriseSecurity for now,
+ can accommodate itsi etc.. later'
+ type: string
+ type: object
+ scope:
+ description: 'Scope of the App deployment: cluster,
+ clusterWithPreConfig, local, premiumApps. Scope determines
+ whether the App(s) is/are installed locally, cluster-wide
+ or its a premium app'
+ type: string
+ volumeName:
+ description: Remote Storage Volume name
+ type: string
+ type: object
+ type: array
+ appsRepoPollIntervalSeconds:
+ description: |-
+ Interval in seconds to check the Remote Storage for App changes.
+ The default value for this config is 1 hour(3600 sec),
+ minimum value is 1 minute(60sec) and maximum value is 1 day(86400 sec).
+ We assign the value based on following conditions -
+ 1. If no value or 0 is specified then it means periodic polling is disabled.
+ 2. If anything less than min is specified then we set it to 1 min.
+ 3. If anything more than the max value is specified then we set it to 1 day.
+ format: int64
+ type: integer
+ defaults:
+ description: Defines the default configuration settings for
+ App sources
+ properties:
+ premiumAppsProps:
+ description: Properties for premium apps, fill in when
+ scope premiumApps is chosen
+ properties:
+ esDefaults:
+ description: Enterpreise Security App defaults
+ properties:
+ sslEnablement:
+ description: "Sets the sslEnablement value for
+ ES app installation\n strict: Ensure that
+ SSL is enabled\n in the web.conf
+ configuration file to use\n this
+ mode. Otherwise, the installer exists\n\t \t
+ \ with an error. This is the DEFAULT mode used\n
+ \ by the operator if left empty.\n
+ \ auto: Enables SSL in the etc/system/local/web.conf\n
+ \ configuration file.\n ignore: Ignores
+ whether SSL is enabled or disabled."
+ type: string
+ type: object
+ type:
+ description: 'Type: enterpriseSecurity for now, can
+ accommodate itsi etc.. later'
+ type: string
+ type: object
+ scope:
+ description: 'Scope of the App deployment: cluster, clusterWithPreConfig,
+ local, premiumApps. Scope determines whether the App(s)
+ is/are installed locally, cluster-wide or its a premium
+ app'
+ type: string
+ volumeName:
+ description: Remote Storage Volume name
+ type: string
+ type: object
+ installMaxRetries:
+ default: 2
+ description: Maximum number of retries to install Apps
+ format: int32
+ minimum: 0
+ type: integer
+ maxConcurrentAppDownloads:
+ description: Maximum number of apps that can be downloaded
+ at same time
+ format: int64
+ type: integer
+ volumes:
+ description: List of remote storage volumes
+ items:
+ description: VolumeSpec defines remote volume config
+ properties:
+ endpoint:
+ description: Remote volume URI
+ type: string
+ name:
+ description: Remote volume name
+ type: string
+ path:
+ description: Remote volume path
+ type: string
+ provider:
+ description: 'App Package Remote Store provider. Supported
+ values: aws, minio, azure, gcp.'
+ type: string
+ region:
+ description: Region of the remote storage volume where
+ apps reside. Used for aws, if provided. Not used for
+ minio and azure.
+ type: string
+ secretRef:
+ description: Secret object name
+ type: string
+ storageType:
+ description: 'Remote Storage type. Supported values:
+ s3, blob, gcs. s3 works with aws or minio providers,
+ whereas blob works with azure provider, gcs works
+ for gcp.'
+ type: string
+ type: object
+ type: array
+ type: object
+ appSrcDeployStatus:
+ additionalProperties:
+ description: AppSrcDeployInfo represents deployment info for
+ list of Apps
+ properties:
+ appDeploymentInfo:
+ items:
+ description: AppDeploymentInfo represents a single App
+ deployment information
+ properties:
+ Size:
+ format: int64
+ type: integer
+ appName:
+ description: |-
+ AppName is the name of app archive retrieved from the
+ remote bucket e.g app1.tgz or app2.spl
+ type: string
+ appPackageTopFolder:
+ description: |-
+ AppPackageTopFolder is the name of top folder when we untar the
+ app archive, which is also assumed to be same as the name of the
+ app after it is installed.
+ type: string
+ auxPhaseInfo:
+ description: |-
+ Used to track the copy and install status for each replica member.
+ Each Pod's phase info is mapped to its ordinal value.
+ Ignored, once the DeployStatus is marked as Complete
+ items:
+ description: PhaseInfo defines the status to track
+ the App framework installation phase
+ properties:
+ failCount:
+ description: represents number of failures
+ format: int32
+ type: integer
+ phase:
+ description: Phase type
+ type: string
+ status:
+ description: Status of the phase
+ format: int32
+ type: integer
+ type: object
+ type: array
+ deployStatus:
+ description: AppDeploymentStatus represents the status
+ of an App on the Pod
+ type: integer
+ isUpdate:
+ type: boolean
+ lastModifiedTime:
+ type: string
+ objectHash:
+ type: string
+ phaseInfo:
+ description: App phase info to track download, copy
+ and install
+ properties:
+ failCount:
+ description: represents number of failures
+ format: int32
+ type: integer
+ phase:
+ description: Phase type
+ type: string
+ status:
+ description: Status of the phase
+ format: int32
+ type: integer
+ type: object
+ repoState:
+ description: AppRepoState represent the App state
+ on remote store
+ type: integer
+ type: object
+ type: array
+ type: object
+ description: Represents the Apps deployment status
+ type: object
+ appsRepoStatusPollIntervalSeconds:
+ description: |-
+ Interval in seconds to check the Remote Storage for App changes
+ This is introduced here so that we dont do spec validation in every reconcile just
+ because the spec and status are different.
+ format: int64
+ type: integer
+ appsStatusMaxConcurrentAppDownloads:
+ description: Represents the Status field for maximum number of
+ apps that can be downloaded at same time
+ format: int64
+ type: integer
+ bundlePushStatus:
+ description: Internal to the App framework. Used in case of CM(IDXC)
+ and deployer(SHC)
+ properties:
+ bundlePushStage:
+ description: Represents the current stage. Internal to the
+ App framework
+ type: integer
+ retryCount:
+ description: defines the number of retries completed so far
+ format: int32
+ type: integer
+ type: object
+ isDeploymentInProgress:
+ description: IsDeploymentInProgress indicates if the Apps deployment
+ is in progress
+ type: boolean
+ lastAppInfoCheckTime:
+ description: This is set to the time when we get the list of apps
+ from remote storage.
+ format: int64
+ type: integer
+ version:
+ description: App Framework version info for future use
+ type: integer
+ type: object
+ credentialSecretVersion:
+ description: Credential secret version to track changes to the secret
+ and trigger rolling restart of indexer cluster peers when the secret
+ is updated
+ type: string
+ message:
+ description: Auxillary message describing CR status
+ type: string
+ phase:
+ description: Phase of the ingestor pods
+ enum:
+ - Pending
+ - Ready
+ - Updating
+ - ScalingUp
+ - ScalingDown
+ - Terminating
+ - Error
+ type: string
+ readyReplicas:
+ description: Number of ready ingestor pods
+ format: int32
+ type: integer
+ replicas:
+ description: Number of desired ingestor pods
+ format: int32
+ type: integer
+ resourceRevMap:
+ additionalProperties:
+ type: string
+ description: Resource revision tracker
+ type: object
+ selector:
+ description: Selector for pods used by HorizontalPodAutoscaler
+ type: string
+ serviceAccount:
+ description: Service account to track changes to the service account
+ and trigger rolling restart of indexer cluster peers when the service
+ account is updated
+ type: string
+ telAppInstalled:
+ description: Telemetry App installation flag
+ type: boolean
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ scale:
+ labelSelectorPath: .status.selector
+ specReplicasPath: .spec.replicas
+ statusReplicasPath: .status.replicas
+ status: {}
diff --git a/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml b/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml
index ed6181c82..e4c5d9bb1 100644
--- a/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml
+++ b/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml
@@ -25,7 +25,7 @@ spec:
jsonPath: .metadata.creationTimestamp
name: Age
type: date
- - description: Auxillary message describing CR status
+ - description: Auxiliary message describing CR status
jsonPath: .status.message
name: Message
type: string
@@ -344,7 +344,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -359,7 +358,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -525,7 +523,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -540,7 +537,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -703,7 +699,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -718,7 +713,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -884,7 +878,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -899,7 +892,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1027,7 +1019,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -1076,7 +1068,7 @@ spec:
type: string
type: object
type:
- description: 'Type: enterpriseSecurity for now, can accomodate
+ description: 'Type: enterpriseSecurity for now, can accommodate
itsi etc.. later'
type: string
type: object
@@ -1380,10 +1372,11 @@ spec:
environment variables)
type: string
imagePullPolicy:
- description: 'Sets pull policy for all images (either “Always” or
- the default: “IfNotPresent”)'
+ description: 'Sets pull policy for all images ("Always", "Never",
+ or the default: "IfNotPresent")'
enum:
- Always
+ - Never
- IfNotPresent
type: string
imagePullSecrets:
@@ -2016,13 +2009,12 @@ spec:
type: object
trafficDistribution:
description: |-
- TrafficDistribution offers a way to express preferences for how traffic is
- distributed to Service endpoints. Implementations can use this field as a
- hint, but are not required to guarantee strict adherence. If the field is
- not set, the implementation will apply its default routing strategy. If set
- to "PreferClose", implementations should prioritize endpoints that are
- topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ TrafficDistribution offers a way to express preferences for how traffic
+ is distributed to Service endpoints. Implementations can use this field
+ as a hint, but are not required to guarantee strict adherence. If the
+ field is not set, the implementation will apply its default routing
+ strategy. If set to "PreferClose", implementations should prioritize
+ endpoints that are in the same zone.
type: string
type:
description: |-
@@ -2150,6 +2142,8 @@ spec:
Ports is a list of records of service ports
If used, every port defined in the service should have an entry in it
items:
+ description: PortStatus represents the error condition
+ of a service port
properties:
error:
description: |-
@@ -2373,7 +2367,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -2384,7 +2377,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -2454,6 +2446,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@@ -2485,8 +2479,10 @@ spec:
- volumeID
type: object
azureDisk:
- description: azureDisk represents an Azure Data Disk mount on
- the host and bind mount to the pod.
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
@@ -2524,8 +2520,10 @@ spec:
- diskURI
type: object
azureFile:
- description: azureFile represents an Azure File Service mount
- on the host and bind mount to the pod.
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@@ -2544,8 +2542,9 @@ spec:
- shareName
type: object
cephfs:
- description: cephFS represents a Ceph FS mount on the host that
- shares a pod's lifetime
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@@ -2597,6 +2596,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@@ -2706,8 +2707,7 @@ spec:
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
- storage that is handled by certain external CSI drivers (Beta
- feature).
+ storage that is handled by certain external CSI drivers.
properties:
driver:
description: |-
@@ -3173,6 +3173,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to use for
@@ -3218,9 +3219,9 @@ spec:
- driver
type: object
flocker:
- description: flocker represents a Flocker volume attached to
- a kubelet's host machine. This depends on the Flocker control
- service being running
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@@ -3236,6 +3237,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@@ -3271,7 +3274,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
- DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@@ -3295,6 +3298,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@@ -3354,7 +3358,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
- Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -3504,8 +3508,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
- description: photonPersistentDisk represents a PhotonController
- persistent disk attached and mounted on kubelets host machine
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@@ -3521,8 +3526,11 @@ spec:
- pdID
type: object
portworxVolume:
- description: portworxVolume represents a portworx volume attached
- and mounted on kubelets host machine
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
properties:
fsType:
description: |-
@@ -3887,8 +3895,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
- description: quobyte represents a Quobyte mount on the host
- that shares a pod's lifetime
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@@ -3927,6 +3936,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@@ -3999,8 +4009,9 @@ spec:
- monitors
type: object
scaleIO:
- description: scaleIO represents a ScaleIO persistent volume
- attached and mounted on Kubernetes nodes.
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@@ -4132,8 +4143,9 @@ spec:
type: string
type: object
storageos:
- description: storageOS represents a StorageOS volume attached
- and mounted on Kubernetes nodes.
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@@ -4178,8 +4190,10 @@ spec:
type: string
type: object
vsphereVolume:
- description: vsphereVolume represents a vSphere volume attached
- and mounted on kubelets host machine
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
@@ -4263,7 +4277,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now,
- can accomodate itsi etc.. later'
+ can accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -4314,7 +4328,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -4493,7 +4507,7 @@ spec:
type: integer
type: object
message:
- description: Auxillary message describing CR status
+ description: Auxiliary message describing CR status
type: string
phase:
description: current phase of the license manager
diff --git a/config/crd/bases/enterprise.splunk.com_licensemasters.yaml b/config/crd/bases/enterprise.splunk.com_licensemasters.yaml
index 85702267d..75fbb64c1 100644
--- a/config/crd/bases/enterprise.splunk.com_licensemasters.yaml
+++ b/config/crd/bases/enterprise.splunk.com_licensemasters.yaml
@@ -339,7 +339,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -354,7 +353,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -520,7 +518,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -535,7 +532,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -698,7 +694,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -713,7 +708,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -879,7 +873,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -894,7 +887,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1022,7 +1014,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -1071,7 +1063,7 @@ spec:
type: string
type: object
type:
- description: 'Type: enterpriseSecurity for now, can accomodate
+ description: 'Type: enterpriseSecurity for now, can accommodate
itsi etc.. later'
type: string
type: object
@@ -1375,10 +1367,11 @@ spec:
environment variables)
type: string
imagePullPolicy:
- description: 'Sets pull policy for all images (either “Always” or
- the default: “IfNotPresent”)'
+ description: 'Sets pull policy for all images ("Always", "Never",
+ or the default: "IfNotPresent")'
enum:
- Always
+ - Never
- IfNotPresent
type: string
imagePullSecrets:
@@ -2011,13 +2004,12 @@ spec:
type: object
trafficDistribution:
description: |-
- TrafficDistribution offers a way to express preferences for how traffic is
- distributed to Service endpoints. Implementations can use this field as a
- hint, but are not required to guarantee strict adherence. If the field is
- not set, the implementation will apply its default routing strategy. If set
- to "PreferClose", implementations should prioritize endpoints that are
- topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ TrafficDistribution offers a way to express preferences for how traffic
+ is distributed to Service endpoints. Implementations can use this field
+ as a hint, but are not required to guarantee strict adherence. If the
+ field is not set, the implementation will apply its default routing
+ strategy. If set to "PreferClose", implementations should prioritize
+ endpoints that are in the same zone.
type: string
type:
description: |-
@@ -2145,6 +2137,8 @@ spec:
Ports is a list of records of service ports
If used, every port defined in the service should have an entry in it
items:
+ description: PortStatus represents the error condition
+ of a service port
properties:
error:
description: |-
@@ -2368,7 +2362,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -2379,7 +2372,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -2449,6 +2441,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@@ -2480,8 +2474,10 @@ spec:
- volumeID
type: object
azureDisk:
- description: azureDisk represents an Azure Data Disk mount on
- the host and bind mount to the pod.
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
@@ -2519,8 +2515,10 @@ spec:
- diskURI
type: object
azureFile:
- description: azureFile represents an Azure File Service mount
- on the host and bind mount to the pod.
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@@ -2539,8 +2537,9 @@ spec:
- shareName
type: object
cephfs:
- description: cephFS represents a Ceph FS mount on the host that
- shares a pod's lifetime
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@@ -2592,6 +2591,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@@ -2701,8 +2702,7 @@ spec:
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
- storage that is handled by certain external CSI drivers (Beta
- feature).
+ storage that is handled by certain external CSI drivers.
properties:
driver:
description: |-
@@ -3168,6 +3168,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to use for
@@ -3213,9 +3214,9 @@ spec:
- driver
type: object
flocker:
- description: flocker represents a Flocker volume attached to
- a kubelet's host machine. This depends on the Flocker control
- service being running
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@@ -3231,6 +3232,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@@ -3266,7 +3269,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
- DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@@ -3290,6 +3293,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@@ -3349,7 +3353,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
- Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -3499,8 +3503,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
- description: photonPersistentDisk represents a PhotonController
- persistent disk attached and mounted on kubelets host machine
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@@ -3516,8 +3521,11 @@ spec:
- pdID
type: object
portworxVolume:
- description: portworxVolume represents a portworx volume attached
- and mounted on kubelets host machine
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
properties:
fsType:
description: |-
@@ -3882,8 +3890,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
- description: quobyte represents a Quobyte mount on the host
- that shares a pod's lifetime
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@@ -3922,6 +3931,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@@ -3994,8 +4004,9 @@ spec:
- monitors
type: object
scaleIO:
- description: scaleIO represents a ScaleIO persistent volume
- attached and mounted on Kubernetes nodes.
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@@ -4127,8 +4138,9 @@ spec:
type: string
type: object
storageos:
- description: storageOS represents a StorageOS volume attached
- and mounted on Kubernetes nodes.
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@@ -4173,8 +4185,10 @@ spec:
type: string
type: object
vsphereVolume:
- description: vsphereVolume represents a vSphere volume attached
- and mounted on kubelets host machine
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
@@ -4258,7 +4272,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now,
- can accomodate itsi etc.. later'
+ can accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -4309,7 +4323,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
diff --git a/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml b/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml
index 2700bb371..dc8feed93 100644
--- a/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml
+++ b/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml
@@ -346,7 +346,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -361,7 +360,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -527,7 +525,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -542,7 +539,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -705,7 +701,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -720,7 +715,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -886,7 +880,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -901,7 +894,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1029,7 +1021,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -1078,7 +1070,7 @@ spec:
type: string
type: object
type:
- description: 'Type: enterpriseSecurity for now, can accomodate
+ description: 'Type: enterpriseSecurity for now, can accommodate
itsi etc.. later'
type: string
type: object
@@ -1382,10 +1374,11 @@ spec:
environment variables)
type: string
imagePullPolicy:
- description: 'Sets pull policy for all images (either “Always” or
- the default: “IfNotPresent”)'
+ description: 'Sets pull policy for all images ("Always", "Never",
+ or the default: "IfNotPresent")'
enum:
- Always
+ - Never
- IfNotPresent
type: string
imagePullSecrets:
@@ -2018,13 +2011,12 @@ spec:
type: object
trafficDistribution:
description: |-
- TrafficDistribution offers a way to express preferences for how traffic is
- distributed to Service endpoints. Implementations can use this field as a
- hint, but are not required to guarantee strict adherence. If the field is
- not set, the implementation will apply its default routing strategy. If set
- to "PreferClose", implementations should prioritize endpoints that are
- topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ TrafficDistribution offers a way to express preferences for how traffic
+ is distributed to Service endpoints. Implementations can use this field
+ as a hint, but are not required to guarantee strict adherence. If the
+ field is not set, the implementation will apply its default routing
+ strategy. If set to "PreferClose", implementations should prioritize
+ endpoints that are in the same zone.
type: string
type:
description: |-
@@ -2152,6 +2144,8 @@ spec:
Ports is a list of records of service ports
If used, every port defined in the service should have an entry in it
items:
+ description: PortStatus represents the error condition
+ of a service port
properties:
error:
description: |-
@@ -2375,7 +2369,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -2386,7 +2379,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -2456,6 +2448,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@@ -2487,8 +2481,10 @@ spec:
- volumeID
type: object
azureDisk:
- description: azureDisk represents an Azure Data Disk mount on
- the host and bind mount to the pod.
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
@@ -2526,8 +2522,10 @@ spec:
- diskURI
type: object
azureFile:
- description: azureFile represents an Azure File Service mount
- on the host and bind mount to the pod.
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@@ -2546,8 +2544,9 @@ spec:
- shareName
type: object
cephfs:
- description: cephFS represents a Ceph FS mount on the host that
- shares a pod's lifetime
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@@ -2599,6 +2598,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@@ -2708,8 +2709,7 @@ spec:
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
- storage that is handled by certain external CSI drivers (Beta
- feature).
+ storage that is handled by certain external CSI drivers.
properties:
driver:
description: |-
@@ -3175,6 +3175,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to use for
@@ -3220,9 +3221,9 @@ spec:
- driver
type: object
flocker:
- description: flocker represents a Flocker volume attached to
- a kubelet's host machine. This depends on the Flocker control
- service being running
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@@ -3238,6 +3239,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@@ -3273,7 +3276,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
- DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@@ -3297,6 +3300,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@@ -3356,7 +3360,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
- Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -3506,8 +3510,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
- description: photonPersistentDisk represents a PhotonController
- persistent disk attached and mounted on kubelets host machine
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@@ -3523,8 +3528,11 @@ spec:
- pdID
type: object
portworxVolume:
- description: portworxVolume represents a portworx volume attached
- and mounted on kubelets host machine
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
properties:
fsType:
description: |-
@@ -3889,8 +3897,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
- description: quobyte represents a Quobyte mount on the host
- that shares a pod's lifetime
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@@ -3929,6 +3938,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@@ -4001,8 +4011,9 @@ spec:
- monitors
type: object
scaleIO:
- description: scaleIO represents a ScaleIO persistent volume
- attached and mounted on Kubernetes nodes.
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@@ -4134,8 +4145,9 @@ spec:
type: string
type: object
storageos:
- description: storageOS represents a StorageOS volume attached
- and mounted on Kubernetes nodes.
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@@ -4180,8 +4192,10 @@ spec:
type: string
type: object
vsphereVolume:
- description: vsphereVolume represents a vSphere volume attached
- and mounted on kubelets host machine
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
@@ -4264,7 +4278,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now,
- can accomodate itsi etc.. later'
+ can accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -4315,7 +4329,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -4546,7 +4560,7 @@ spec:
jsonPath: .metadata.creationTimestamp
name: Age
type: date
- - description: Auxillary message describing CR status
+ - description: Auxiliary message describing CR status
jsonPath: .status.message
name: Message
type: string
@@ -4863,7 +4877,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -4878,7 +4891,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5044,7 +5056,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5059,7 +5070,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5222,7 +5232,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5237,7 +5246,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5403,7 +5411,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5418,7 +5425,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5546,7 +5552,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -5595,7 +5601,7 @@ spec:
type: string
type: object
type:
- description: 'Type: enterpriseSecurity for now, can accomodate
+ description: 'Type: enterpriseSecurity for now, can accommodate
itsi etc.. later'
type: string
type: object
@@ -5899,10 +5905,11 @@ spec:
environment variables)
type: string
imagePullPolicy:
- description: 'Sets pull policy for all images (either “Always” or
- the default: “IfNotPresent”)'
+ description: 'Sets pull policy for all images ("Always", "Never",
+ or the default: "IfNotPresent")'
enum:
- Always
+ - Never
- IfNotPresent
type: string
imagePullSecrets:
@@ -6535,13 +6542,12 @@ spec:
type: object
trafficDistribution:
description: |-
- TrafficDistribution offers a way to express preferences for how traffic is
- distributed to Service endpoints. Implementations can use this field as a
- hint, but are not required to guarantee strict adherence. If the field is
- not set, the implementation will apply its default routing strategy. If set
- to "PreferClose", implementations should prioritize endpoints that are
- topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ TrafficDistribution offers a way to express preferences for how traffic
+ is distributed to Service endpoints. Implementations can use this field
+ as a hint, but are not required to guarantee strict adherence. If the
+ field is not set, the implementation will apply its default routing
+ strategy. If set to "PreferClose", implementations should prioritize
+ endpoints that are in the same zone.
type: string
type:
description: |-
@@ -6669,6 +6675,8 @@ spec:
Ports is a list of records of service ports
If used, every port defined in the service should have an entry in it
items:
+ description: PortStatus represents the error condition
+ of a service port
properties:
error:
description: |-
@@ -6892,7 +6900,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -6903,7 +6910,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -6973,6 +6979,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@@ -7004,8 +7012,10 @@ spec:
- volumeID
type: object
azureDisk:
- description: azureDisk represents an Azure Data Disk mount on
- the host and bind mount to the pod.
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
@@ -7043,8 +7053,10 @@ spec:
- diskURI
type: object
azureFile:
- description: azureFile represents an Azure File Service mount
- on the host and bind mount to the pod.
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@@ -7063,8 +7075,9 @@ spec:
- shareName
type: object
cephfs:
- description: cephFS represents a Ceph FS mount on the host that
- shares a pod's lifetime
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@@ -7116,6 +7129,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@@ -7225,8 +7240,7 @@ spec:
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
- storage that is handled by certain external CSI drivers (Beta
- feature).
+ storage that is handled by certain external CSI drivers.
properties:
driver:
description: |-
@@ -7692,6 +7706,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to use for
@@ -7737,9 +7752,9 @@ spec:
- driver
type: object
flocker:
- description: flocker represents a Flocker volume attached to
- a kubelet's host machine. This depends on the Flocker control
- service being running
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@@ -7755,6 +7770,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@@ -7790,7 +7807,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
- DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@@ -7814,6 +7831,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@@ -7873,7 +7891,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
- Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -8023,8 +8041,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
- description: photonPersistentDisk represents a PhotonController
- persistent disk attached and mounted on kubelets host machine
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@@ -8040,8 +8059,11 @@ spec:
- pdID
type: object
portworxVolume:
- description: portworxVolume represents a portworx volume attached
- and mounted on kubelets host machine
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
properties:
fsType:
description: |-
@@ -8406,8 +8428,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
- description: quobyte represents a Quobyte mount on the host
- that shares a pod's lifetime
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@@ -8446,6 +8469,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@@ -8518,8 +8542,9 @@ spec:
- monitors
type: object
scaleIO:
- description: scaleIO represents a ScaleIO persistent volume
- attached and mounted on Kubernetes nodes.
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@@ -8651,8 +8676,9 @@ spec:
type: string
type: object
storageos:
- description: storageOS represents a StorageOS volume attached
- and mounted on Kubernetes nodes.
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@@ -8697,8 +8723,10 @@ spec:
type: string
type: object
vsphereVolume:
- description: vsphereVolume represents a vSphere volume attached
- and mounted on kubelets host machine
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
@@ -8781,7 +8809,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now,
- can accomodate itsi etc.. later'
+ can accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -8832,7 +8860,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -9022,7 +9050,7 @@ spec:
type: boolean
type: object
message:
- description: Auxillary message describing CR status
+ description: Auxiliary message describing CR status
type: string
phase:
description: current phase of the monitoring console
diff --git a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml
new file mode 100644
index 000000000..b2ae90755
--- /dev/null
+++ b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml
@@ -0,0 +1,114 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.18.0
+ name: objectstorages.enterprise.splunk.com
+spec:
+ group: enterprise.splunk.com
+ names:
+ kind: ObjectStorage
+ listKind: ObjectStorageList
+ plural: objectstorages
+ shortNames:
+ - os
+ singular: objectstorage
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Status of object storage
+ jsonPath: .status.phase
+ name: Phase
+ type: string
+ - description: Age of object storage resource
+ jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - description: Auxillary message describing CR status
+ jsonPath: .status.message
+ name: Message
+ type: string
+ name: v4
+ schema:
+ openAPIV3Schema:
+ description: ObjectStorage is the Schema for the objectstorages API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ObjectStorageSpec defines the desired state of ObjectStorage
+ properties:
+ provider:
+ description: Provider of queue resources
+ enum:
+ - s3
+ type: string
+ s3:
+ description: s3 specific inputs
+ properties:
+ endpoint:
+ description: S3-compatible Service endpoint
+ pattern: ^https?://[^\s/$.?#].[^\s]*$
+ type: string
+ path:
+ description: S3 bucket path
+ pattern: ^(?:s3://)?[a-z0-9.-]{3,63}(?:/[^\s]+)?$
+ type: string
+ required:
+ - path
+ type: object
+ required:
+ - provider
+ - s3
+ type: object
+ x-kubernetes-validations:
+ - message: provider is immutable once created
+ rule: self.provider == oldSelf.provider
+ - message: s3 is immutable once created
+ rule: self.s3 == oldSelf.s3
+ - message: s3 must be provided when provider is s3
+ rule: self.provider != 's3' || has(self.s3)
+ status:
+ description: ObjectStorageStatus defines the observed state of ObjectStorage.
+ properties:
+ message:
+ description: Auxillary message describing CR status
+ type: string
+ phase:
+ description: Phase of the object storage
+ enum:
+ - Pending
+ - Ready
+ - Updating
+ - ScalingUp
+ - ScalingDown
+ - Terminating
+ - Error
+ type: string
+ resourceRevMap:
+ additionalProperties:
+ type: string
+ description: Resource revision tracker
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/config/crd/bases/enterprise.splunk.com_queues.yaml b/config/crd/bases/enterprise.splunk.com_queues.yaml
new file mode 100644
index 000000000..0da0aa93c
--- /dev/null
+++ b/config/crd/bases/enterprise.splunk.com_queues.yaml
@@ -0,0 +1,163 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.18.0
+ name: queues.enterprise.splunk.com
+spec:
+ group: enterprise.splunk.com
+ names:
+ kind: Queue
+ listKind: QueueList
+ plural: queues
+ shortNames:
+ - queue
+ singular: queue
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Status of queue
+ jsonPath: .status.phase
+ name: Phase
+ type: string
+ - description: Age of queue resource
+ jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - description: Auxillary message describing CR status
+ jsonPath: .status.message
+ name: Message
+ type: string
+ name: v4
+ schema:
+ openAPIV3Schema:
+ description: Queue is the Schema for the queues API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: QueueSpec defines the desired state of Queue
+ properties:
+ provider:
+ description: Provider of queue resources
+ enum:
+ - sqs
+ - sqs_cp
+ type: string
+ sqs:
+ description: sqs specific inputs
+ properties:
+ authRegion:
+ description: Auth Region of the resources
+ pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$
+ type: string
+ dlq:
+ description: Name of the dead letter queue resource
+ minLength: 1
+ type: string
+ endpoint:
+ description: Amazon SQS Service endpoint
+ pattern: ^https?://[^\s/$.?#].[^\s]*$
+ type: string
+ name:
+ description: Name of the queue
+ minLength: 1
+ type: string
+ volumes:
+ description: List of remote storage volumes
+ items:
+ description: VolumeSpec defines remote volume config
+ properties:
+ endpoint:
+ description: Remote volume URI
+ type: string
+ name:
+ description: Remote volume name
+ type: string
+ path:
+ description: Remote volume path
+ type: string
+ provider:
+ description: 'App Package Remote Store provider. Supported
+ values: aws, minio, azure, gcp.'
+ type: string
+ region:
+ description: Region of the remote storage volume where apps
+ reside. Used for aws, if provided. Not used for minio
+ and azure.
+ type: string
+ secretRef:
+ description: Secret object name
+ type: string
+ storageType:
+ description: 'Remote Storage type. Supported values: s3,
+ blob, gcs. s3 works with aws or minio providers, whereas
+ blob works with azure provider, gcs works for gcp.'
+ type: string
+ type: object
+ type: array
+ required:
+ - dlq
+ - name
+ type: object
+ required:
+ - provider
+ - sqs
+ type: object
+ x-kubernetes-validations:
+ - message: provider is immutable once created
+ rule: self.provider == oldSelf.provider
+ - message: sqs.name is immutable once created
+ rule: self.sqs.name == oldSelf.sqs.name
+ - message: sqs.authRegion is immutable once created
+ rule: self.sqs.authRegion == oldSelf.sqs.authRegion
+ - message: sqs.dlq is immutable once created
+ rule: self.sqs.dlq == oldSelf.sqs.dlq
+ - message: sqs.endpoint is immutable once created
+ rule: self.sqs.endpoint == oldSelf.sqs.endpoint
+ - message: sqs must be provided when provider is sqs or sqs_cp
+ rule: (self.provider != 'sqs' && self.provider != 'sqs_cp') || has(self.sqs)
+ status:
+ description: QueueStatus defines the observed state of Queue
+ properties:
+ message:
+ description: Auxillary message describing CR status
+ type: string
+ phase:
+ description: Phase of the queue
+ enum:
+ - Pending
+ - Ready
+ - Updating
+ - ScalingUp
+ - ScalingDown
+ - Terminating
+ - Error
+ type: string
+ resourceRevMap:
+ additionalProperties:
+ type: string
+ description: Resource revision tracker
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml
index 122f4d3bc..a905f1f36 100644
--- a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml
+++ b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml
@@ -352,7 +352,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -367,7 +366,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -533,7 +531,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -548,7 +545,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -711,7 +707,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -726,7 +721,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -892,7 +886,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -907,7 +900,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1035,7 +1027,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -1084,7 +1076,7 @@ spec:
type: string
type: object
type:
- description: 'Type: enterpriseSecurity for now, can accomodate
+ description: 'Type: enterpriseSecurity for now, can accommodate
itsi etc.. later'
type: string
type: object
@@ -1388,10 +1380,11 @@ spec:
environment variables)
type: string
imagePullPolicy:
- description: 'Sets pull policy for all images (either “Always” or
- the default: “IfNotPresent”)'
+ description: 'Sets pull policy for all images ("Always", "Never",
+ or the default: "IfNotPresent")'
enum:
- Always
+ - Never
- IfNotPresent
type: string
imagePullSecrets:
@@ -2029,13 +2022,12 @@ spec:
type: object
trafficDistribution:
description: |-
- TrafficDistribution offers a way to express preferences for how traffic is
- distributed to Service endpoints. Implementations can use this field as a
- hint, but are not required to guarantee strict adherence. If the field is
- not set, the implementation will apply its default routing strategy. If set
- to "PreferClose", implementations should prioritize endpoints that are
- topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ TrafficDistribution offers a way to express preferences for how traffic
+ is distributed to Service endpoints. Implementations can use this field
+ as a hint, but are not required to guarantee strict adherence. If the
+ field is not set, the implementation will apply its default routing
+ strategy. If set to "PreferClose", implementations should prioritize
+ endpoints that are in the same zone.
type: string
type:
description: |-
@@ -2163,6 +2155,8 @@ spec:
Ports is a list of records of service ports
If used, every port defined in the service should have an entry in it
items:
+ description: PortStatus represents the error condition
+ of a service port
properties:
error:
description: |-
@@ -2386,7 +2380,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -2397,7 +2390,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -2467,6 +2459,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@@ -2498,8 +2492,10 @@ spec:
- volumeID
type: object
azureDisk:
- description: azureDisk represents an Azure Data Disk mount on
- the host and bind mount to the pod.
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
@@ -2537,8 +2533,10 @@ spec:
- diskURI
type: object
azureFile:
- description: azureFile represents an Azure File Service mount
- on the host and bind mount to the pod.
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@@ -2557,8 +2555,9 @@ spec:
- shareName
type: object
cephfs:
- description: cephFS represents a Ceph FS mount on the host that
- shares a pod's lifetime
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@@ -2610,6 +2609,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@@ -2719,8 +2720,7 @@ spec:
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
- storage that is handled by certain external CSI drivers (Beta
- feature).
+ storage that is handled by certain external CSI drivers.
properties:
driver:
description: |-
@@ -3186,6 +3186,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to use for
@@ -3231,9 +3232,9 @@ spec:
- driver
type: object
flocker:
- description: flocker represents a Flocker volume attached to
- a kubelet's host machine. This depends on the Flocker control
- service being running
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@@ -3249,6 +3250,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@@ -3284,7 +3287,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
- DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@@ -3308,6 +3311,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@@ -3367,7 +3371,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
- Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -3517,8 +3521,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
- description: photonPersistentDisk represents a PhotonController
- persistent disk attached and mounted on kubelets host machine
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@@ -3534,8 +3539,11 @@ spec:
- pdID
type: object
portworxVolume:
- description: portworxVolume represents a portworx volume attached
- and mounted on kubelets host machine
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
properties:
fsType:
description: |-
@@ -3900,8 +3908,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
- description: quobyte represents a Quobyte mount on the host
- that shares a pod's lifetime
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@@ -3940,6 +3949,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@@ -4012,8 +4022,9 @@ spec:
- monitors
type: object
scaleIO:
- description: scaleIO represents a ScaleIO persistent volume
- attached and mounted on Kubernetes nodes.
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@@ -4145,8 +4156,9 @@ spec:
type: string
type: object
storageos:
- description: storageOS represents a StorageOS volume attached
- and mounted on Kubernetes nodes.
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@@ -4191,8 +4203,10 @@ spec:
type: string
type: object
vsphereVolume:
- description: vsphereVolume represents a vSphere volume attached
- and mounted on kubelets host machine
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
@@ -4287,7 +4301,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now,
- can accomodate itsi etc.. later'
+ can accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -4338,7 +4352,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -4637,7 +4651,7 @@ spec:
jsonPath: .metadata.creationTimestamp
name: Age
type: date
- - description: Auxillary message describing CR status
+ - description: Auxiliary message describing CR status
jsonPath: .status.message
name: Message
type: string
@@ -4956,7 +4970,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -4971,7 +4984,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5137,7 +5149,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5152,7 +5163,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5315,7 +5325,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5330,7 +5339,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5496,7 +5504,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5511,7 +5518,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5639,7 +5645,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -5688,7 +5694,7 @@ spec:
type: string
type: object
type:
- description: 'Type: enterpriseSecurity for now, can accomodate
+ description: 'Type: enterpriseSecurity for now, can accommodate
itsi etc.. later'
type: string
type: object
@@ -6249,10 +6255,11 @@ spec:
environment variables)
type: string
imagePullPolicy:
- description: 'Sets pull policy for all images (either “Always” or
- the default: “IfNotPresent”)'
+ description: 'Sets pull policy for all images ("Always", "Never",
+ or the default: "IfNotPresent")'
enum:
- Always
+ - Never
- IfNotPresent
type: string
imagePullSecrets:
@@ -6890,13 +6897,12 @@ spec:
type: object
trafficDistribution:
description: |-
- TrafficDistribution offers a way to express preferences for how traffic is
- distributed to Service endpoints. Implementations can use this field as a
- hint, but are not required to guarantee strict adherence. If the field is
- not set, the implementation will apply its default routing strategy. If set
- to "PreferClose", implementations should prioritize endpoints that are
- topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ TrafficDistribution offers a way to express preferences for how traffic
+ is distributed to Service endpoints. Implementations can use this field
+ as a hint, but are not required to guarantee strict adherence. If the
+ field is not set, the implementation will apply its default routing
+ strategy. If set to "PreferClose", implementations should prioritize
+ endpoints that are in the same zone.
type: string
type:
description: |-
@@ -7024,6 +7030,8 @@ spec:
Ports is a list of records of service ports
If used, every port defined in the service should have an entry in it
items:
+ description: PortStatus represents the error condition
+ of a service port
properties:
error:
description: |-
@@ -7247,7 +7255,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -7258,7 +7265,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -7328,6 +7334,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@@ -7359,8 +7367,10 @@ spec:
- volumeID
type: object
azureDisk:
- description: azureDisk represents an Azure Data Disk mount on
- the host and bind mount to the pod.
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
@@ -7398,8 +7408,10 @@ spec:
- diskURI
type: object
azureFile:
- description: azureFile represents an Azure File Service mount
- on the host and bind mount to the pod.
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@@ -7418,8 +7430,9 @@ spec:
- shareName
type: object
cephfs:
- description: cephFS represents a Ceph FS mount on the host that
- shares a pod's lifetime
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@@ -7471,6 +7484,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@@ -7580,8 +7595,7 @@ spec:
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
- storage that is handled by certain external CSI drivers (Beta
- feature).
+ storage that is handled by certain external CSI drivers.
properties:
driver:
description: |-
@@ -8047,6 +8061,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to use for
@@ -8092,9 +8107,9 @@ spec:
- driver
type: object
flocker:
- description: flocker represents a Flocker volume attached to
- a kubelet's host machine. This depends on the Flocker control
- service being running
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@@ -8110,6 +8125,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@@ -8145,7 +8162,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
- DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@@ -8169,6 +8186,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@@ -8228,7 +8246,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
- Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -8378,8 +8396,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
- description: photonPersistentDisk represents a PhotonController
- persistent disk attached and mounted on kubelets host machine
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@@ -8395,8 +8414,11 @@ spec:
- pdID
type: object
portworxVolume:
- description: portworxVolume represents a portworx volume attached
- and mounted on kubelets host machine
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
properties:
fsType:
description: |-
@@ -8761,8 +8783,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
- description: quobyte represents a Quobyte mount on the host
- that shares a pod's lifetime
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@@ -8801,6 +8824,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@@ -8873,8 +8897,9 @@ spec:
- monitors
type: object
scaleIO:
- description: scaleIO represents a ScaleIO persistent volume
- attached and mounted on Kubernetes nodes.
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@@ -9006,8 +9031,9 @@ spec:
type: string
type: object
storageos:
- description: storageOS represents a StorageOS volume attached
- and mounted on Kubernetes nodes.
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@@ -9052,8 +9078,10 @@ spec:
type: string
type: object
vsphereVolume:
- description: vsphereVolume represents a vSphere volume attached
- and mounted on kubelets host machine
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
@@ -9148,7 +9176,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now,
- can accomodate itsi etc.. later'
+ can accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -9199,7 +9227,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -9430,7 +9458,7 @@ spec:
type: object
type: array
message:
- description: Auxillary message describing CR status
+ description: Auxiliary message describing CR status
type: string
minPeersJoined:
description: true if the minimum number of search head cluster members
diff --git a/config/crd/bases/enterprise.splunk.com_standalones.yaml b/config/crd/bases/enterprise.splunk.com_standalones.yaml
index 387a408f0..39373448c 100644
--- a/config/crd/bases/enterprise.splunk.com_standalones.yaml
+++ b/config/crd/bases/enterprise.splunk.com_standalones.yaml
@@ -347,7 +347,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -362,7 +361,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -528,7 +526,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -543,7 +540,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -706,7 +702,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -721,7 +716,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -887,7 +881,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -902,7 +895,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1030,7 +1022,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -1079,7 +1071,7 @@ spec:
type: string
type: object
type:
- description: 'Type: enterpriseSecurity for now, can accomodate
+ description: 'Type: enterpriseSecurity for now, can accommodate
itsi etc.. later'
type: string
type: object
@@ -1383,10 +1375,11 @@ spec:
environment variables)
type: string
imagePullPolicy:
- description: 'Sets pull policy for all images (either “Always” or
- the default: “IfNotPresent”)'
+ description: 'Sets pull policy for all images ("Always", "Never",
+ or the default: "IfNotPresent")'
enum:
- Always
+ - Never
- IfNotPresent
type: string
imagePullSecrets:
@@ -2023,13 +2016,12 @@ spec:
type: object
trafficDistribution:
description: |-
- TrafficDistribution offers a way to express preferences for how traffic is
- distributed to Service endpoints. Implementations can use this field as a
- hint, but are not required to guarantee strict adherence. If the field is
- not set, the implementation will apply its default routing strategy. If set
- to "PreferClose", implementations should prioritize endpoints that are
- topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ TrafficDistribution offers a way to express preferences for how traffic
+ is distributed to Service endpoints. Implementations can use this field
+ as a hint, but are not required to guarantee strict adherence. If the
+ field is not set, the implementation will apply its default routing
+ strategy. If set to "PreferClose", implementations should prioritize
+ endpoints that are in the same zone.
type: string
type:
description: |-
@@ -2157,6 +2149,8 @@ spec:
Ports is a list of records of service ports
If used, every port defined in the service should have an entry in it
items:
+ description: PortStatus represents the error condition
+ of a service port
properties:
error:
description: |-
@@ -2497,7 +2491,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -2508,7 +2501,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -2578,6 +2570,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@@ -2609,8 +2603,10 @@ spec:
- volumeID
type: object
azureDisk:
- description: azureDisk represents an Azure Data Disk mount on
- the host and bind mount to the pod.
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
@@ -2648,8 +2644,10 @@ spec:
- diskURI
type: object
azureFile:
- description: azureFile represents an Azure File Service mount
- on the host and bind mount to the pod.
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@@ -2668,8 +2666,9 @@ spec:
- shareName
type: object
cephfs:
- description: cephFS represents a Ceph FS mount on the host that
- shares a pod's lifetime
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@@ -2721,6 +2720,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@@ -2830,8 +2831,7 @@ spec:
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
- storage that is handled by certain external CSI drivers (Beta
- feature).
+ storage that is handled by certain external CSI drivers.
properties:
driver:
description: |-
@@ -3297,6 +3297,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to use for
@@ -3342,9 +3343,9 @@ spec:
- driver
type: object
flocker:
- description: flocker represents a Flocker volume attached to
- a kubelet's host machine. This depends on the Flocker control
- service being running
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@@ -3360,6 +3361,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@@ -3395,7 +3398,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
- DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@@ -3419,6 +3422,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@@ -3478,7 +3482,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
- Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -3628,8 +3632,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
- description: photonPersistentDisk represents a PhotonController
- persistent disk attached and mounted on kubelets host machine
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@@ -3645,8 +3650,11 @@ spec:
- pdID
type: object
portworxVolume:
- description: portworxVolume represents a portworx volume attached
- and mounted on kubelets host machine
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
properties:
fsType:
description: |-
@@ -4011,8 +4019,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
- description: quobyte represents a Quobyte mount on the host
- that shares a pod's lifetime
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@@ -4051,6 +4060,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@@ -4123,8 +4133,9 @@ spec:
- monitors
type: object
scaleIO:
- description: scaleIO represents a ScaleIO persistent volume
- attached and mounted on Kubernetes nodes.
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@@ -4256,8 +4267,9 @@ spec:
type: string
type: object
storageos:
- description: storageOS represents a StorageOS volume attached
- and mounted on Kubernetes nodes.
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@@ -4302,8 +4314,10 @@ spec:
type: string
type: object
vsphereVolume:
- description: vsphereVolume represents a vSphere volume attached
- and mounted on kubelets host machine
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
@@ -4387,7 +4401,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now,
- can accomodate itsi etc.. later'
+ can accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -4438,7 +4452,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -4790,7 +4804,7 @@ spec:
jsonPath: .metadata.creationTimestamp
name: Age
type: date
- - description: Auxillary message describing CR status
+ - description: Auxiliary message describing CR status
jsonPath: .status.message
name: Message
type: string
@@ -5108,7 +5122,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5123,7 +5136,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5289,7 +5301,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5304,7 +5315,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5467,7 +5477,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5482,7 +5491,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5648,7 +5656,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5663,7 +5670,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -5791,7 +5797,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -5840,7 +5846,7 @@ spec:
type: string
type: object
type:
- description: 'Type: enterpriseSecurity for now, can accomodate
+ description: 'Type: enterpriseSecurity for now, can accommodate
itsi etc.. later'
type: string
type: object
@@ -6144,10 +6150,11 @@ spec:
environment variables)
type: string
imagePullPolicy:
- description: 'Sets pull policy for all images (either “Always” or
- the default: “IfNotPresent”)'
+ description: 'Sets pull policy for all images ("Always", "Never",
+ or the default: "IfNotPresent")'
enum:
- Always
+ - Never
- IfNotPresent
type: string
imagePullSecrets:
@@ -6784,13 +6791,12 @@ spec:
type: object
trafficDistribution:
description: |-
- TrafficDistribution offers a way to express preferences for how traffic is
- distributed to Service endpoints. Implementations can use this field as a
- hint, but are not required to guarantee strict adherence. If the field is
- not set, the implementation will apply its default routing strategy. If set
- to "PreferClose", implementations should prioritize endpoints that are
- topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ TrafficDistribution offers a way to express preferences for how traffic
+ is distributed to Service endpoints. Implementations can use this field
+ as a hint, but are not required to guarantee strict adherence. If the
+ field is not set, the implementation will apply its default routing
+ strategy. If set to "PreferClose", implementations should prioritize
+ endpoints that are in the same zone.
type: string
type:
description: |-
@@ -6918,6 +6924,8 @@ spec:
Ports is a list of records of service ports
If used, every port defined in the service should have an entry in it
items:
+ description: PortStatus represents the error condition
+ of a service port
properties:
error:
description: |-
@@ -7258,7 +7266,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -7269,7 +7276,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
- This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -7339,6 +7345,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@@ -7370,8 +7378,10 @@ spec:
- volumeID
type: object
azureDisk:
- description: azureDisk represents an Azure Data Disk mount on
- the host and bind mount to the pod.
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
@@ -7409,8 +7419,10 @@ spec:
- diskURI
type: object
azureFile:
- description: azureFile represents an Azure File Service mount
- on the host and bind mount to the pod.
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@@ -7429,8 +7441,9 @@ spec:
- shareName
type: object
cephfs:
- description: cephFS represents a Ceph FS mount on the host that
- shares a pod's lifetime
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@@ -7482,6 +7495,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@@ -7591,8 +7606,7 @@ spec:
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
- storage that is handled by certain external CSI drivers (Beta
- feature).
+ storage that is handled by certain external CSI drivers.
properties:
driver:
description: |-
@@ -8058,6 +8072,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to use for
@@ -8103,9 +8118,9 @@ spec:
- driver
type: object
flocker:
- description: flocker represents a Flocker volume attached to
- a kubelet's host machine. This depends on the Flocker control
- service being running
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@@ -8121,6 +8136,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@@ -8156,7 +8173,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
- DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@@ -8180,6 +8197,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@@ -8239,7 +8257,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
- Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -8389,8 +8407,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
- description: photonPersistentDisk represents a PhotonController
- persistent disk attached and mounted on kubelets host machine
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@@ -8406,8 +8425,11 @@ spec:
- pdID
type: object
portworxVolume:
- description: portworxVolume represents a portworx volume attached
- and mounted on kubelets host machine
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
properties:
fsType:
description: |-
@@ -8772,8 +8794,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
- description: quobyte represents a Quobyte mount on the host
- that shares a pod's lifetime
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@@ -8812,6 +8835,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@@ -8884,8 +8908,9 @@ spec:
- monitors
type: object
scaleIO:
- description: scaleIO represents a ScaleIO persistent volume
- attached and mounted on Kubernetes nodes.
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@@ -9017,8 +9042,9 @@ spec:
type: string
type: object
storageos:
- description: storageOS represents a StorageOS volume attached
- and mounted on Kubernetes nodes.
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@@ -9063,8 +9089,10 @@ spec:
type: string
type: object
vsphereVolume:
- description: vsphereVolume represents a vSphere volume attached
- and mounted on kubelets host machine
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
@@ -9148,7 +9176,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now,
- can accomodate itsi etc.. later'
+ can accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -9199,7 +9227,7 @@ spec:
type: object
type:
description: 'Type: enterpriseSecurity for now, can
- accomodate itsi etc.. later'
+ accommodate itsi etc.. later'
type: string
type: object
scope:
@@ -9378,7 +9406,7 @@ spec:
type: integer
type: object
message:
- description: Auxillary message describing CR status
+ description: Auxiliary message describing CR status
type: string
phase:
description: current phase of the standalone instances
diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml
index dd0d870ec..21dd480ce 100644
--- a/config/crd/kustomization.yaml
+++ b/config/crd/kustomization.yaml
@@ -10,6 +10,9 @@ resources:
- bases/enterprise.splunk.com_monitoringconsoles.yaml
- bases/enterprise.splunk.com_searchheadclusters.yaml
- bases/enterprise.splunk.com_standalones.yaml
+- bases/enterprise.splunk.com_ingestorclusters.yaml
+- bases/enterprise.splunk.com_queues.yaml
+- bases/enterprise.splunk.com_objectstorages.yaml
#+kubebuilder:scaffold:crdkustomizeresource
@@ -17,26 +20,20 @@ patchesStrategicMerge:
- patches/patch_preserve_unknown_fields.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
-#- patches/webhook_in_clustermanagers.yaml
-#- patches/webhook_in_clustermasters.yaml
-#- patches/webhook_in_indexerclusters.yaml
-#- patches/webhook_in_licensemasters.yaml
-#- patches/webhook_in_licensemanagers.yaml
-#- patches/webhook_in_monitoringconsoles.yaml
-#- patches/webhook_in_searchheadclusters.yaml
-#- patches/webhook_in_standalones.yaml
+- patches/webhook_in_clustermanagers.yaml
+- patches/webhook_in_clustermasters.yaml
+- patches/webhook_in_indexerclusters.yaml
+- patches/webhook_in_ingestorclusters.yaml
+- patches/webhook_in_licensemasters.yaml
+- patches/webhook_in_licensemanagers.yaml
+- patches/webhook_in_monitoringconsoles.yaml
+- patches/webhook_in_searchheadclusters.yaml
+- patches/webhook_in_standalones.yaml
#+kubebuilder:scaffold:crdkustomizewebhookpatch
-# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
-# patches here are for enabling the CA injection for each CRD
-#- patches/cainjection_in_clustermanagers.yaml
-#- patches/cainjection_in_clustermasters.yaml
-#- patches/cainjection_in_indexerclusters.yaml
-#- patches/cainjection_in_licensemasters.yaml
-#- patches/cainjection_in_licensemanagers.yaml
-#- patches/cainjection_in_monitoringconsoles.yaml
-#- patches/cainjection_in_searchheadclusters.yaml
-#- patches/cainjection_in_standalones.yaml
+# [CERTMANAGER] CA injection is now handled via replacements in config/default/kustomization.yaml
+# The cainjection patches are no longer needed with the new kustomize replacements approach.
+# See: https://github.com/kubernetes-sigs/kubebuilder/blob/v4.5.2/testdata/project-v4/config/default/kustomization.yaml
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
diff --git a/config/crd/patches/cainjection_in_ingestorclusters.yaml b/config/crd/patches/cainjection_in_ingestorclusters.yaml
new file mode 100644
index 000000000..77bda7398
--- /dev/null
+++ b/config/crd/patches/cainjection_in_ingestorclusters.yaml
@@ -0,0 +1,7 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
+ name: ingestorclusters.enterprise.splunk.com
diff --git a/config/crd/patches/webhook_in_ingestorclusters.yaml b/config/crd/patches/webhook_in_ingestorclusters.yaml
new file mode 100644
index 000000000..3c50a081d
--- /dev/null
+++ b/config/crd/patches/webhook_in_ingestorclusters.yaml
@@ -0,0 +1,16 @@
+# The following patch enables a conversion webhook for the CRD
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: ingestorclusters.enterprise.splunk.com
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
+ conversionReviewVersions:
+ - v1
diff --git a/config/default-with-webhook/kustomization-cluster.yaml b/config/default-with-webhook/kustomization-cluster.yaml
new file mode 100644
index 000000000..c596f0c68
--- /dev/null
+++ b/config/default-with-webhook/kustomization-cluster.yaml
@@ -0,0 +1,137 @@
+# Adds namespace to all resources.
+# Cluster-scoped deployment WITH webhook enabled (opt-in)
+# Requires cert-manager to be installed in the cluster
+namespace: splunk-operator
+
+# Value of this field is prepended to the
+# names of all resources, e.g. a deployment named
+# "wordpress" becomes "alices-wordpress".
+# Note that it should also match with the prefix (text before '-') of the namespace
+# field above.
+namePrefix: splunk-operator-
+
+# Labels to add to all resources and selectors.
+commonLabels:
+ name: splunk-operator
+
+bases:
+- ../crd
+- ../rbac
+- ../persistent-volume
+- ../service
+- ../manager
+# [WEBHOOK] Enabled for opt-in webhook deployment
+- ../webhook
+# [CERTMANAGER] Required for webhook TLS
+- ../certmanager
+# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
+#- ../prometheus
+# [METRICS] Expose the controller manager metrics service.
+- metrics_service.yaml
+
+patchesStrategicMerge:
+# Mount the controller config file for loading manager configurations
+# through a ComponentConfig type
+#- manager_config_patch.yaml
+
+# [WEBHOOK] Enabled for webhook deployment
+- manager_webhook_patch.yaml
+
+# [CERTMANAGER] Enabled for CA injection in the admission webhooks
+- webhookcainjection_patch.yaml
+
+# the following config is for teaching kustomize how to do var substitution
+vars:
+# [CERTMANAGER] Variables for cert-manager CA injection
+- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
+ objref:
+ kind: Certificate
+ group: cert-manager.io
+ version: v1
+ name: serving-cert # this name should match the one in certificate.yaml
+ fieldref:
+ fieldpath: metadata.namespace
+- name: CERTIFICATE_NAME
+ objref:
+ kind: Certificate
+ group: cert-manager.io
+ version: v1
+ name: serving-cert # this name should match the one in certificate.yaml
+- name: SERVICE_NAMESPACE # namespace of the service
+ objref:
+ kind: Service
+ version: v1
+ name: webhook-service
+ fieldref:
+ fieldpath: metadata.namespace
+- name: SERVICE_NAME
+ objref:
+ kind: Service
+ version: v1
+ name: webhook-service
+
+#patches:
+#- target:
+# kind: Deployment
+# name: controller-manager
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk-operator
+#- target:
+# kind: ServiceAccount
+# name: controller-manager
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk-operator
+#- target:
+# kind: Service
+# name: controller-manager-service
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk-operator-service
+#- target:
+# kind: Role
+# name: manager-role
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk:operator:namespace-manager
+#- target:
+# kind: RoleBinding
+# name: manager-rolebinding
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk:operator:namespace-manager
+
+# currently patch is set to change deployment environment variables
+patches:
+- target:
+ kind: Deployment
+ name: controller-manager
+ patch: |-
+ - op: add
+ path: /spec/template/spec/containers/0/env
+ value:
+ - name: WATCH_NAMESPACE
+ value: WATCH_NAMESPACE_VALUE
+ - name: RELATED_IMAGE_SPLUNK_ENTERPRISE
+ value: SPLUNK_ENTERPRISE_IMAGE
+ - name: OPERATOR_NAME
+ value: splunk-operator
+ - name: SPLUNK_GENERAL_TERMS
+ value: SPLUNK_GENERAL_TERMS_VALUE
+ - name: ENABLE_VALIDATION_WEBHOOK
+ value: "true"
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443.
+# More info: https://book.kubebuilder.io/reference/metrics
+- path: manager_metrics_patch.yaml
+ target:
+ kind: Deployment
diff --git a/config/default-with-webhook/kustomization-namespace.yaml b/config/default-with-webhook/kustomization-namespace.yaml
new file mode 100644
index 000000000..193791601
--- /dev/null
+++ b/config/default-with-webhook/kustomization-namespace.yaml
@@ -0,0 +1,139 @@
+# Adds namespace to all resources.
+# Namespace-scoped deployment WITH webhook enabled (opt-in)
+# Requires cert-manager to be installed in the cluster
+namespace: splunk-operator
+
+# Value of this field is prepended to the
+# names of all resources, e.g. a deployment named
+# "wordpress" becomes "alices-wordpress".
+# Note that it should also match with the prefix (text before '-') of the namespace
+# field above.
+namePrefix: splunk-operator-
+
+# Labels to add to all resources and selectors.
+commonLabels:
+ name: splunk-operator
+
+bases:
+- ../crd
+- ../rbac
+- ../persistent-volume
+- ../service
+- ../manager
+# [WEBHOOK] Enabled for opt-in webhook deployment
+- ../webhook
+# [CERTMANAGER] Required for webhook TLS
+- ../certmanager
+# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
+#- ../prometheus
+# [METRICS] Expose the controller manager metrics service.
+- metrics_service.yaml
+
+patchesStrategicMerge:
+# Mount the controller config file for loading manager configurations
+# through a ComponentConfig type
+#- manager_config_patch.yaml
+
+# [WEBHOOK] Enabled for webhook deployment
+- manager_webhook_patch.yaml
+
+# [CERTMANAGER] Enabled for CA injection in the admission webhooks
+- webhookcainjection_patch.yaml
+
+# the following config is for teaching kustomize how to do var substitution
+vars:
+# [CERTMANAGER] Variables for cert-manager CA injection
+- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
+ objref:
+ kind: Certificate
+ group: cert-manager.io
+ version: v1
+ name: serving-cert # this name should match the one in certificate.yaml
+ fieldref:
+ fieldpath: metadata.namespace
+- name: CERTIFICATE_NAME
+ objref:
+ kind: Certificate
+ group: cert-manager.io
+ version: v1
+ name: serving-cert # this name should match the one in certificate.yaml
+- name: SERVICE_NAMESPACE # namespace of the service
+ objref:
+ kind: Service
+ version: v1
+ name: webhook-service
+ fieldref:
+ fieldpath: metadata.namespace
+- name: SERVICE_NAME
+ objref:
+ kind: Service
+ version: v1
+ name: webhook-service
+
+#patches:
+#- target:
+# kind: Deployment
+# name: controller-manager
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk-operator
+#- target:
+# kind: ServiceAccount
+# name: controller-manager
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk-operator
+#- target:
+# kind: Service
+# name: controller-manager-service
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk-operator-service
+#- target:
+# kind: Role
+# name: manager-role
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk:operator:namespace-manager
+#- target:
+# kind: RoleBinding
+# name: manager-rolebinding
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk:operator:namespace-manager
+
+# currently patch is set to change deployment environment variables
+patches:
+- target:
+ kind: Deployment
+ name: controller-manager
+ patch: |-
+ - op: add
+ path: /spec/template/spec/containers/0/env
+ value:
+ - name: WATCH_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: RELATED_IMAGE_SPLUNK_ENTERPRISE
+ value: SPLUNK_ENTERPRISE_IMAGE
+ - name: OPERATOR_NAME
+ value: splunk-operator
+ - name: SPLUNK_GENERAL_TERMS
+ value: SPLUNK_GENERAL_TERMS_VALUE
+ - name: ENABLE_VALIDATION_WEBHOOK
+ value: "true"
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443.
+# More info: https://book.kubebuilder.io/reference/metrics
+- path: manager_metrics_patch.yaml
+ target:
+ kind: Deployment
diff --git a/config/default-with-webhook/kustomization.yaml b/config/default-with-webhook/kustomization.yaml
new file mode 100644
index 000000000..5ba87fec1
--- /dev/null
+++ b/config/default-with-webhook/kustomization.yaml
@@ -0,0 +1,137 @@
+# Adds namespace to all resources.
+# Cluster-scoped deployment WITH webhook enabled (opt-in)
+# Requires cert-manager to be installed in the cluster
+namespace: splunk-operator
+
+# Value of this field is prepended to the
+# names of all resources, e.g. a deployment named
+# "wordpress" becomes "alices-wordpress".
+# Note that it should also match with the prefix (text before '-') of the namespace
+# field above.
+namePrefix: splunk-operator-
+
+# Labels to add to all resources and selectors.
+commonLabels:
+ name: splunk-operator
+
+bases:
+- ../crd
+- ../rbac
+- ../persistent-volume
+- ../service
+- ../manager
+# [WEBHOOK] Enabled for opt-in webhook deployment
+- ../webhook
+# [CERTMANAGER] Required for webhook TLS
+- ../certmanager
+# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
+#- ../prometheus
+# [METRICS] Expose the controller manager metrics service.
+- metrics_service.yaml
+
+patchesStrategicMerge:
+# Mount the controller config file for loading manager configurations
+# through a ComponentConfig type
+#- manager_config_patch.yaml
+
+# [WEBHOOK] Enabled for webhook deployment
+- manager_webhook_patch.yaml
+
+# [CERTMANAGER] Enabled for CA injection in the admission webhooks
+- webhookcainjection_patch.yaml
+
+# the following config is for teaching kustomize how to do var substitution
+vars:
+# [CERTMANAGER] Variables for cert-manager CA injection
+- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
+ objref:
+ kind: Certificate
+ group: cert-manager.io
+ version: v1
+ name: serving-cert # this name should match the one in certificate.yaml
+ fieldref:
+ fieldpath: metadata.namespace
+- name: CERTIFICATE_NAME
+ objref:
+ kind: Certificate
+ group: cert-manager.io
+ version: v1
+ name: serving-cert # this name should match the one in certificate.yaml
+- name: SERVICE_NAMESPACE # namespace of the service
+ objref:
+ kind: Service
+ version: v1
+ name: webhook-service
+ fieldref:
+ fieldpath: metadata.namespace
+- name: SERVICE_NAME
+ objref:
+ kind: Service
+ version: v1
+ name: webhook-service
+
+#patches:
+#- target:
+# kind: Deployment
+# name: controller-manager
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk-operator
+#- target:
+# kind: ServiceAccount
+# name: controller-manager
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk-operator
+#- target:
+# kind: Service
+# name: controller-manager-service
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk-operator-service
+#- target:
+# kind: Role
+# name: manager-role
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk:operator:namespace-manager
+#- target:
+# kind: RoleBinding
+# name: manager-rolebinding
+# patch: |-
+# - op: replace
+# path: /metadata/name
+# value: splunk:operator:namespace-manager
+
+# currently patch is set to change deployment environment variables
+patches:
+- target:
+ kind: Deployment
+ name: controller-manager
+ patch: |-
+ - op: add
+ path: /spec/template/spec/containers/0/env
+ value:
+ - name: WATCH_NAMESPACE
+ value: WATCH_NAMESPACE_VALUE
+ - name: RELATED_IMAGE_SPLUNK_ENTERPRISE
+ value: SPLUNK_ENTERPRISE_IMAGE
+ - name: OPERATOR_NAME
+ value: splunk-operator
+ - name: SPLUNK_GENERAL_TERMS
+ value: WATCH_NAMESPACE_VALUE
+ - name: ENABLE_VALIDATION_WEBHOOK
+ value: "true"
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443.
+# More info: https://book.kubebuilder.io/reference/metrics
+- path: manager_metrics_patch.yaml
+ target:
+ kind: Deployment
diff --git a/config/default-with-webhook/manager_metrics_patch.yaml b/config/default-with-webhook/manager_metrics_patch.yaml
new file mode 100644
index 000000000..488f13693
--- /dev/null
+++ b/config/default-with-webhook/manager_metrics_patch.yaml
@@ -0,0 +1,4 @@
+# This patch adds the args to allow exposing the metrics endpoint using HTTPS
+- op: add
+ path: /spec/template/spec/containers/0/args/0
+ value: --metrics-bind-address=:8443
\ No newline at end of file
diff --git a/config/default-with-webhook/manager_webhook_patch.yaml b/config/default-with-webhook/manager_webhook_patch.yaml
new file mode 100644
index 000000000..738de350b
--- /dev/null
+++ b/config/default-with-webhook/manager_webhook_patch.yaml
@@ -0,0 +1,23 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: controller-manager
+ namespace: system
+spec:
+ template:
+ spec:
+ containers:
+ - name: manager
+ ports:
+ - containerPort: 9443
+ name: webhook-server
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /tmp/k8s-webhook-server/serving-certs
+ name: cert
+ readOnly: true
+ volumes:
+ - name: cert
+ secret:
+ defaultMode: 420
+ secretName: webhook-server-cert
diff --git a/config/default-with-webhook/metrics_service.yaml b/config/default-with-webhook/metrics_service.yaml
new file mode 100644
index 000000000..cebb2683b
--- /dev/null
+++ b/config/default-with-webhook/metrics_service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ control-plane: controller-manager
+ app.kubernetes.io/name: controller-manager
+ app.kubernetes.io/managed-by: kustomize
+ name: controller-manager-metrics-service
+ namespace: system
+spec:
+ ports:
+ - name: https
+ port: 8443
+ protocol: TCP
+ targetPort: 8443
+ selector:
+ control-plane: controller-manager
\ No newline at end of file
diff --git a/config/default-with-webhook/webhookcainjection_patch.yaml b/config/default-with-webhook/webhookcainjection_patch.yaml
new file mode 100644
index 000000000..50ca12118
--- /dev/null
+++ b/config/default-with-webhook/webhookcainjection_patch.yaml
@@ -0,0 +1,15 @@
+# This patch add annotation to admission webhook config and
+# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize.
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ labels:
+ app.kubernetes.io/name: validatingwebhookconfiguration
+ app.kubernetes.io/instance: validating-webhook-configuration
+ app.kubernetes.io/component: webhook
+ app.kubernetes.io/created-by: splunk-operator
+ app.kubernetes.io/part-of: splunk-operator
+ app.kubernetes.io/managed-by: kustomize
+ name: validating-webhook-configuration
+ annotations:
+ cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml
index 15c98e24a..8cf491ac3 100644
--- a/config/default/kustomization.yaml
+++ b/config/default/kustomization.yaml
@@ -1,4 +1,6 @@
# Adds namespace to all resources.
+# Cluster-scoped deployment WITHOUT webhook (default)
+# To enable webhook, use config/default-with-webhook overlay
namespace: splunk-operator
# Value of this field is prepended to the
@@ -18,33 +20,29 @@ bases:
- ../persistent-volume
- ../service
- ../manager
-# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
-# crd/kustomization.yaml
+# [WEBHOOK] To enable webhook, use config/default-with-webhook overlay
#- ../webhook
-# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
+# [CERTMANAGER] Required for webhook TLS
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
# [METRICS] Expose the controller manager metrics service.
- metrics_service.yaml
+# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy.
+# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics.
+# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will
+# be able to communicate with the Webhook Server.
+#- ../network-policy
-patchesStrategicMerge:
-# Mount the controller config file for loading manager configurations
-# through a ComponentConfig type
-#- manager_config_patch.yaml
-
-# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
-# crd/kustomization.yaml
+# [WEBHOOK] To enable webhook, use config/default-with-webhook overlay
+#patchesStrategicMerge:
#- manager_webhook_patch.yaml
-
-# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
-# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
-# 'CERTMANAGER' needs to be enabled to use ca injection
+# [CERTMANAGER] Enabled for CA injection in the admission webhooks
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
-vars:
-# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
+# [CERTMANAGER] Variables for cert-manager CA injection
+#vars:
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
@@ -134,4 +132,10 @@ patches:
# More info: https://book.kubebuilder.io/reference/metrics
- path: manager_metrics_patch.yaml
target:
- kind: Deployment
\ No newline at end of file
+ kind: Deployment
+
+# [WEBHOOK] To enable webhook, use config/default-with-webhook overlay
+#- path: manager_webhook_patch.yaml
+# target:
+# kind: Deployment
+
diff --git a/config/manager/controller_manager_telemetry.yaml b/config/manager/controller_manager_telemetry.yaml
new file mode 100644
index 000000000..2ccc8d264
--- /dev/null
+++ b/config/manager/controller_manager_telemetry.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: manager-telemetry
+data:
+ status: |
+ {
+ "lastTransmission": "",
+ "test": "false",
+ "sokVersion": "3.1.0"
+ }
diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml
index 47f07b0e6..d6116406b 100644
--- a/config/manager/kustomization.yaml
+++ b/config/manager/kustomization.yaml
@@ -1,5 +1,6 @@
resources:
- manager.yaml
+- controller_manager_telemetry.yaml
generatorOptions:
disableNameSuffixHash: true
diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml
index 3974d02f0..f03f5ec9e 100644
--- a/config/manager/manager.yaml
+++ b/config/manager/manager.yaml
@@ -52,6 +52,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.name
+ ports: []
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
@@ -85,6 +86,7 @@ spec:
volumeMounts:
- mountPath: /opt/splunk/appframework/
name: app-staging
+ # Additional volumeMounts will be added by patches for webhook and metrics certs
serviceAccountName: controller-manager
volumes:
- configMap:
diff --git a/config/network-policy/allow-metrics-traffic.yaml b/config/network-policy/allow-metrics-traffic.yaml
new file mode 100644
index 000000000..7c604ca7b
--- /dev/null
+++ b/config/network-policy/allow-metrics-traffic.yaml
@@ -0,0 +1,26 @@
+# This NetworkPolicy allows ingress traffic
+# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those
+# namespaces are able to gathering data from the metrics endpoint.
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ labels:
+ app.kubernetes.io/name: splunk-operator
+ app.kubernetes.io/managed-by: kustomize
+ name: allow-metrics-traffic
+ namespace: system
+spec:
+ podSelector:
+ matchLabels:
+ control-plane: controller-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ # This allows ingress traffic from any namespace with the label metrics: enabled
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ metrics: enabled # Only from namespaces with this label
+ ports:
+ - port: 8443
+ protocol: TCP
diff --git a/config/network-policy/allow-webhook-traffic.yaml b/config/network-policy/allow-webhook-traffic.yaml
new file mode 100644
index 000000000..27d89ad2c
--- /dev/null
+++ b/config/network-policy/allow-webhook-traffic.yaml
@@ -0,0 +1,26 @@
+# This NetworkPolicy allows ingress traffic to your webhook server running
+# as part of the controller-manager from specific namespaces and pods. CR(s) which uses webhooks
+# will only work when applied in namespaces labeled with 'webhook: enabled'
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ labels:
+ app.kubernetes.io/name: splunk-operator
+ app.kubernetes.io/managed-by: kustomize
+ name: allow-webhook-traffic
+ namespace: system
+spec:
+ podSelector:
+ matchLabels:
+ control-plane: controller-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ # This allows ingress traffic from any namespace with the label webhook: enabled
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ webhook: enabled # Only from namespaces with this label
+ ports:
+ - port: 443
+ protocol: TCP
diff --git a/config/network-policy/kustomization.yaml b/config/network-policy/kustomization.yaml
new file mode 100644
index 000000000..0872bee12
--- /dev/null
+++ b/config/network-policy/kustomization.yaml
@@ -0,0 +1,3 @@
+resources:
+- allow-webhook-traffic.yaml
+- allow-metrics-traffic.yaml
diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml
index ed137168a..fdc5481b1 100644
--- a/config/prometheus/kustomization.yaml
+++ b/config/prometheus/kustomization.yaml
@@ -1,2 +1,11 @@
resources:
- monitor.yaml
+
+# [PROMETHEUS-WITH-CERTS] The following patch configures the ServiceMonitor in ../prometheus
+# to securely reference certificates created and managed by cert-manager.
+# Additionally, ensure that you uncomment the [METRICS WITH CERTMANAGER] patch under config/default/kustomization.yaml
+# to mount the "metrics-server-cert" secret in the Manager Deployment.
+#patches:
+# - path: monitor_tls_patch.yaml
+# target:
+# kind: ServiceMonitor
diff --git a/config/prometheus/monitor_tls_patch.yaml b/config/prometheus/monitor_tls_patch.yaml
new file mode 100644
index 000000000..5bf84ce0d
--- /dev/null
+++ b/config/prometheus/monitor_tls_patch.yaml
@@ -0,0 +1,19 @@
+# Patch for Prometheus ServiceMonitor to enable secure TLS configuration
+# using certificates managed by cert-manager
+- op: replace
+ path: /spec/endpoints/0/tlsConfig
+ value:
+ # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize
+ serverName: SERVICE_NAME.SERVICE_NAMESPACE.svc
+ insecureSkipVerify: false
+ ca:
+ secret:
+ name: metrics-server-cert
+ key: ca.crt
+ cert:
+ secret:
+ name: metrics-server-cert
+ key: tls.crt
+ keySecret:
+ name: metrics-server-cert
+ key: tls.key
diff --git a/config/rbac/ingestorcluster_editor_role.yaml b/config/rbac/ingestorcluster_editor_role.yaml
new file mode 100644
index 000000000..7faa1e8bb
--- /dev/null
+++ b/config/rbac/ingestorcluster_editor_role.yaml
@@ -0,0 +1,30 @@
+# This rule is not used by the project splunk-operator itself.
+# It is provided to allow the cluster admin to help manage permissions for users.
+#
+# Grants permissions to create, update, and delete resources within the enterprise.splunk.com.
+# This role is intended for users who need to manage these resources
+# but should not control RBAC or manage permissions for others.
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ingestorcluster-editor-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters/status
+ verbs:
+ - get
diff --git a/config/rbac/ingestorcluster_viewer_role.yaml b/config/rbac/ingestorcluster_viewer_role.yaml
new file mode 100644
index 000000000..e02ffe8f4
--- /dev/null
+++ b/config/rbac/ingestorcluster_viewer_role.yaml
@@ -0,0 +1,26 @@
+# This rule is not used by the project splunk-operator itself.
+# It is provided to allow the cluster admin to help manage permissions for users.
+#
+# Grants read-only access to enterprise.splunk.com resources.
+# This role is intended for users who need visibility into these resources
+# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing.
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ingestorcluster-viewer-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters/status
+ verbs:
+ - get
diff --git a/config/rbac/objectstorage_editor_role.yaml b/config/rbac/objectstorage_editor_role.yaml
new file mode 100644
index 000000000..70323227f
--- /dev/null
+++ b/config/rbac/objectstorage_editor_role.yaml
@@ -0,0 +1,30 @@
+# This rule is not used by the project splunk-operator itself.
+# It is provided to allow the cluster admin to help manage permissions for users.
+#
+# Grants permissions to create, update, and delete resources within the enterprise.splunk.com.
+# This role is intended for users who need to manage these resources
+# but should not control RBAC or manage permissions for others.
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: objectstorage-editor-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages/status
+ verbs:
+ - get
diff --git a/config/rbac/objectstorage_viewer_role.yaml b/config/rbac/objectstorage_viewer_role.yaml
new file mode 100644
index 000000000..9764699bc
--- /dev/null
+++ b/config/rbac/objectstorage_viewer_role.yaml
@@ -0,0 +1,26 @@
+# This rule is not used by the project splunk-operator itself.
+# It is provided to allow the cluster admin to help manage permissions for users.
+#
+# Grants read-only access to enterprise.splunk.com resources.
+# This role is intended for users who need visibility into these resources
+# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing.
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: objectstorage-viewer-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages/status
+ verbs:
+ - get
diff --git a/config/rbac/queue_editor_role.yaml b/config/rbac/queue_editor_role.yaml
new file mode 100644
index 000000000..bf7e4d890
--- /dev/null
+++ b/config/rbac/queue_editor_role.yaml
@@ -0,0 +1,30 @@
+# This rule is not used by the project splunk-operator itself.
+# It is provided to allow the cluster admin to help manage permissions for users.
+#
+# Grants permissions to create, update, and delete resources within the enterprise.splunk.com.
+# This role is intended for users who need to manage these resources
+# but should not control RBAC or manage permissions for others.
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: queue-editor-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues/status
+ verbs:
+ - get
diff --git a/config/rbac/queue_viewer_role.yaml b/config/rbac/queue_viewer_role.yaml
new file mode 100644
index 000000000..b186c8650
--- /dev/null
+++ b/config/rbac/queue_viewer_role.yaml
@@ -0,0 +1,26 @@
+# This rule is not used by the project splunk-operator itself.
+# It is provided to allow the cluster admin to help manage permissions for users.
+#
+# Grants read-only access to enterprise.splunk.com resources.
+# This role is intended for users who need visibility into these resources
+# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing.
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: queue-viewer-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues/status
+ verbs:
+ - get
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index 1bbc2427e..7873f18e1 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -50,9 +50,12 @@ rules:
- clustermanagers
- clustermasters
- indexerclusters
+ - ingestorclusters
- licensemanagers
- licensemasters
- monitoringconsoles
+ - objectstorages
+ - queues
- searchheadclusters
- standalones
verbs:
@@ -69,9 +72,12 @@ rules:
- clustermanagers/finalizers
- clustermasters/finalizers
- indexerclusters/finalizers
+ - ingestorclusters/finalizers
- licensemanagers/finalizers
- licensemasters/finalizers
- monitoringconsoles/finalizers
+ - objectstorages/finalizers
+ - queues/finalizers
- searchheadclusters/finalizers
- standalones/finalizers
verbs:
@@ -82,9 +88,12 @@ rules:
- clustermanagers/status
- clustermasters/status
- indexerclusters/status
+ - ingestorclusters/status
- licensemanagers/status
- licensemasters/status
- monitoringconsoles/status
+ - objectstorages/status
+ - queues/status
- searchheadclusters/status
- standalones/status
verbs:
diff --git a/config/samples/enterprise_v4_ingestorcluster.yaml b/config/samples/enterprise_v4_ingestorcluster.yaml
new file mode 100644
index 000000000..2d022fd99
--- /dev/null
+++ b/config/samples/enterprise_v4_ingestorcluster.yaml
@@ -0,0 +1,8 @@
+apiVersion: enterprise.splunk.com/v4
+kind: IngestorCluster
+metadata:
+ name: ingestorcluster-sample
+ finalizers:
+ - "enterprise.splunk.com/delete-pvc"
+spec: {}
+# TODO(user): Add fields here
diff --git a/config/samples/enterprise_v4_objectstorage.yaml b/config/samples/enterprise_v4_objectstorage.yaml
new file mode 100644
index 000000000..b693a14e0
--- /dev/null
+++ b/config/samples/enterprise_v4_objectstorage.yaml
@@ -0,0 +1,8 @@
+apiVersion: enterprise.splunk.com/v4
+kind: ObjectStorage
+metadata:
+ name: objectstorage-sample
+ finalizers:
+ - "enterprise.splunk.com/delete-pvc"
+spec: {}
+# TODO(user): Add fields here
diff --git a/config/samples/enterprise_v4_queue.yaml b/config/samples/enterprise_v4_queue.yaml
new file mode 100644
index 000000000..374d4adb2
--- /dev/null
+++ b/config/samples/enterprise_v4_queue.yaml
@@ -0,0 +1,8 @@
+apiVersion: enterprise.splunk.com/v4
+kind: Queue
+metadata:
+ name: queue-sample
+ finalizers:
+ - "enterprise.splunk.com/delete-pvc"
+spec: {}
+# TODO(user): Add fields here
diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml
index 73c6d3649..34c05ab05 100644
--- a/config/samples/kustomization.yaml
+++ b/config/samples/kustomization.yaml
@@ -13,4 +13,7 @@ resources:
- enterprise_v4_searchheadcluster.yaml
- enterprise_v4_clustermanager.yaml
- enterprise_v4_licensemanager.yaml
+- enterprise_v4_ingestorcluster.yaml
+- enterprise_v4_queue.yaml
+- enterprise_v4_objectstorage.yaml
#+kubebuilder:scaffold:manifestskustomizesamples
diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml
new file mode 100644
index 000000000..9cf26134e
--- /dev/null
+++ b/config/webhook/kustomization.yaml
@@ -0,0 +1,6 @@
+resources:
+- manifests.yaml
+- service.yaml
+
+configurations:
+- kustomizeconfig.yaml
diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml
new file mode 100644
index 000000000..e809f7820
--- /dev/null
+++ b/config/webhook/kustomizeconfig.yaml
@@ -0,0 +1,18 @@
+# the following config is for teaching kustomize where to look at when substituting vars.
+# It requires kustomize v2.1.0 or newer to work properly.
+nameReference:
+- kind: Service
+ version: v1
+ fieldSpecs:
+ - kind: ValidatingWebhookConfiguration
+ group: admissionregistration.k8s.io
+ path: webhooks/clientConfig/service/name
+
+namespace:
+- kind: ValidatingWebhookConfiguration
+ group: admissionregistration.k8s.io
+ path: webhooks/clientConfig/service/namespace
+ create: true
+
+varReference:
+- path: metadata/annotations
diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml
new file mode 100644
index 000000000..f534bd66b
--- /dev/null
+++ b/config/webhook/manifests.yaml
@@ -0,0 +1,31 @@
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ name: validating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: system
+ path: /validate
+ failurePolicy: Fail
+ name: vsplunk.enterprise.splunk.com
+ rules:
+ - apiGroups:
+ - enterprise.splunk.com
+ apiVersions:
+ - v4
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - standalones
+ - indexerclusters
+ - searchheadclusters
+ - clustermanagers
+ - licensemanagers
+ - monitoringconsoles
+ sideEffects: None
diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml
new file mode 100644
index 000000000..567677934
--- /dev/null
+++ b/config/webhook/service.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: webhook-service
+ namespace: system
+spec:
+ ports:
+ - port: 443
+ protocol: TCP
+ targetPort: 9443
+ selector:
+ control-plane: controller-manager
diff --git a/docs/AppFramework.md b/docs/AppFramework.md
index 9424f8dca..2ac19fef0 100644
--- a/docs/AppFramework.md
+++ b/docs/AppFramework.md
@@ -28,6 +28,7 @@ nav_order: 2
- [App Framework Fields](#description-of-app-framework-specification-fields)
- [App Framework Examples](#examples-of-app-framework-usage)
- [Standalone](#how-to-use-the-app-framework-on-a-standalone-cr)
+ - [Ingestor Cluster](#how-to-use-the-app-framework-on-ingestor-cluster)
- [Cluster Manager](#how-to-use-the-app-framework-on-indexer-cluster)
- [Search Head Cluster](#how-to-use-the-app-framework-on-search-head-cluster)
- [Multiple Scopes](#how-to-install-apps-for-both-local-and-cluster-scopes)
@@ -819,11 +820,11 @@ Copy your Splunk App or Add-on archive files to the unique folders on the remote
## Description of App Framework Specification fields
-The App Framework configuration is supported on the following Custom Resources: Standalone, ClusterManager, SearchHeadCluster, MonitoringConsole and LicenseManager. Configuring the App framework requires:
+The App Framework configuration is supported on the following Custom Resources: Standalone, IngestorCluster, ClusterManager, SearchHeadCluster, MonitoringConsole and LicenseManager. Configuring the App framework requires:
* Remote Source of Apps: Define the remote storage location, including unique folders, and the path to each folder.
* Destination of Apps: Define which Custom Resources need to be configured.
-* Scope of Apps: Define if the apps need to be installed and run locally (such as Standalone, Monitoring Console and License Manager,) or cluster-wide (such as Indexer Cluster, and Search Head Cluster.)
+* Scope of Apps: Define if the apps need to be installed and run locally (such as Standalone, Monitoring Console, License Manager and Ingestor Cluster) or cluster-wide (such as Indexer Cluster, and Search Head Cluster.)
Here is a typical App framework configuration in a Custom Resource definition:
@@ -938,6 +939,7 @@ NOTE: If an app source name needs to be changed, make sure the name change is pe
| Standalone | local | Yes | $SPLUNK_HOME/etc/apps | N/A |
| LicenseManager | local | Yes | $SPLUNK_HOME/etc/apps | N/A |
| MonitoringConsole | local | Yes | $SPLUNK_HOME/etc/apps | N/A |
+ | IngestorCluster | local | Yes | $SPLUNK_HOME/etc/apps | N/A |
| IndexerCluster | N/A | No | N/A | $SPLUNK_HOME/etc/peer-apps |
* `volume` refers to the remote storage volume name configured under the `volumes` stanza (see previous section.)
@@ -1015,6 +1017,69 @@ volumes:
Apply the Custom Resource specification: `kubectl apply -f Standalone.yaml`
+### How to use the App Framework on Ingestor Cluster
+
+In this example, you'll deploy Ingestor Cluster with a remote storage volume, the location of the app archive, and set the installation location for the Splunk Enterprise Pod instance by using `scope`.
+
+Example using s3: IngestorCluster.yaml
+
+```yaml
+apiVersion: enterprise.splunk.com/v4
+kind: IngestorCluster
+metadata:
+ name: ic
+ finalizers:
+ - enterprise.splunk.com/delete-pvc
+spec:
+ replicas: 1
+ appRepo:
+ appsRepoPollIntervalSeconds: 600
+ defaults:
+ volumeName: volume_app_repo
+ scope: local
+ appSources:
+ - name: networkApps
+ location: networkAppsLoc/
+ - name: authApps
+ location: authAppsLoc/
+ volumes:
+ - name: volume_app_repo
+ storageType: s3
+ provider: aws
+ path: bucket-app-framework/IngestorCluster-us/
+ endpoint: https://s3-us-west-2.amazonaws.com
+ region: us-west-2
+ secretRef: s3-secret
+```
+
+Volume variants for other providers (replace only the volumes stanza):
+
+Azure Blob volumes snippet:
+
+```yaml
+volumes:
+ - name: volume_app_repo
+ storageType: blob
+ provider: azure
+ path: bucket-app-framework/IngestorCluster-us/
+ endpoint: https://mystorageaccount.blob.core.windows.net
+ secretRef: azureblob-secret
+```
+
+GCP GCS volumes snippet:
+
+```yaml
+volumes:
+ - name: volume_app_repo
+ storageType: gcs
+ provider: gcp
+ path: bucket-app-framework/IngestorCluster-us/
+ endpoint: https://storage.googleapis.com
+ secretRef: gcs-secret
+```
+
+Apply the Custom Resource specification: `kubectl apply -f IngestorCluster.yaml`
+
### How to use the App Framework on Indexer Cluster
This example describes the installation of apps on an Indexer Cluster and Cluster Manager. This is achieved by deploying a ClusterManager CR with a remote storage volume, setting the location of the app archives, and the installation scope to support both local and cluster app path distribution.
diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md
index 0d518988e..7e3c7531f 100644
--- a/docs/CONTRIBUTING.md
+++ b/docs/CONTRIBUTING.md
@@ -18,6 +18,7 @@ This document is the single source of truth on contributing towards this codebas
- [Bug reports and feature requests](#bug-reports-and-feature-requests)
- [Fixing issues](#fixing-issues)
- [Pull requests](#pull-requests)
+ - [Maintainer Workflow for External Contributions](#maintainer-workflow-for-external-contributions)
- [Code Review](#code-review)
- [Testing](#testing)
- [Documentation](#documentation)
@@ -87,6 +88,58 @@ To make a pull request against this project:
```
1. Submit a pull request through the GitHub website using the changes from your forked codebase
+#### Maintainer Workflow for External Contributions
+
+When a pull request is submitted from a forked repository (external contributor), the CI/CD pipeline running on the fork will not have access to our internal test infrastructure. To properly test these contributions, maintainers should follow this workflow:
+
+1. **Initial Review**: Before running any code on internal infrastructure, thoroughly review the pull request to verify it is safe to execute. Check for:
+ - **Security concerns**: No malicious code, credential harvesting, or unauthorized access attempts
+ - **Resource safety**: No code that could damage or overload internal infrastructure
+ - **Code quality**: Adherence to project standards and coding conventions
+ - **Overall approach**: Changes align with project goals and architecture
+
+ **Only proceed to the next step if you are confident the changes are safe to run on internal infrastructure.**
+
+2. **Pull and Push to Internal Branch**: Once the initial review is satisfactory and you want to run the full test suite:
+ ```bash
+ # Add the contributor's fork as a remote (one-time setup per contributor)
+ $ git remote add contributor-name https://github.com/CONTRIBUTOR_USERNAME/splunk-operator.git
+
+ # Fetch the contributor's branch
+ $ git fetch contributor-name their-branch-name
+
+ # Create a new branch in the main repository based on their work
+ $ git checkout -b external/contributor-name/their-branch-name contributor-name/their-branch-name
+
+ # Push to the main repository
+ $ git push origin external/contributor-name/their-branch-name
+ ```
+
+3. **Run CI/CD Pipeline**: The pipeline will now run with full access to internal test infrastructure, including:
+ - Integration tests requiring cloud provider credentials
+ - Access to private test clusters
+ - Internal performance testing environments
+
+4. **Review Test Results**: Monitor the pipeline execution and review all test results.
+
+5. **Communicate Findings**: If tests fail or changes are needed:
+ - Comment on the original pull request with detailed feedback
+ - Request changes from the contributor
+ - Once the contributor updates their PR, repeat this process
+
+6. **Merge**: Once all tests pass and the code is approved:
+ - Merge the original pull request from the fork (not the internal branch)
+ - Delete the internal test branch
+ ```bash
+ $ git push origin --delete external/contributor-name/their-branch-name
+ ```
+
+**Important Notes:**
+- Always maintain clear communication with external contributors about the testing process
+- The internal test branch is temporary and should be deleted after the PR is merged or closed
+- This workflow ensures external contributions receive the same level of testing as internal changes
+- Never merge the internal test branch directly; always merge the original PR from the fork
+
#### Code Review
There are two aspects of code review: giving and receiving.
diff --git a/docs/CustomResources.md b/docs/CustomResources.md
index a6f68ba05..328b41d60 100644
--- a/docs/CustomResources.md
+++ b/docs/CustomResources.md
@@ -18,8 +18,11 @@ you can use to manage Splunk Enterprise deployments in your Kubernetes cluster.
- [LicenseManager Resource Spec Parameters](#licensemanager-resource-spec-parameters)
- [Standalone Resource Spec Parameters](#standalone-resource-spec-parameters)
- [SearchHeadCluster Resource Spec Parameters](#searchheadcluster-resource-spec-parameters)
+ - [Queue Resource Spec Parameters](#queue-resource-spec-parameters)
- [ClusterManager Resource Spec Parameters](#clustermanager-resource-spec-parameters)
- [IndexerCluster Resource Spec Parameters](#indexercluster-resource-spec-parameters)
+ - [IngestorCluster Resource Spec Parameters](#ingestorcluster-resource-spec-parameters)
+ - [ObjectStorage Resource Spec Parameters](#objectstorage-resource-spec-parameters)
- [MonitoringConsole Resource Spec Parameters](#monitoringconsole-resource-spec-parameters)
- [Examples of Guaranteed and Burstable QoS](#examples-of-guaranteed-and-burstable-qos)
- [A Guaranteed QoS Class example:](#a-guaranteed-qos-class-example)
@@ -141,7 +144,7 @@ spec:
The following additional configuration parameters may be used for all Splunk
Enterprise resources, including: `Standalone`, `LicenseManager`,
-`SearchHeadCluster`, `ClusterManager` and `IndexerCluster`:
+`SearchHeadCluster`, `ClusterManager`, `IndexerCluster` and `IngestorCluster`:
| Key | Type | Description |
| ------------------ | ------- | ----------------------------------------------------------------------------- |
@@ -278,6 +281,41 @@ spec:
cpu: "4"
```
+## Queue Resource Spec Parameters
+
+```yaml
+apiVersion: enterprise.splunk.com/v4
+kind: Queue
+metadata:
+ name: queue
+spec:
+ replicas: 3
+ provider: sqs
+ sqs:
+ name: sqs-test
+ region: us-west-2
+ endpoint: https://sqs.us-west-2.amazonaws.com
+ dlq: sqs-dlq-test
+```
+
+Queue inputs can be found in the table below. As of now, only SQS provider of message queue is supported.
+
+| Key | Type | Description |
+| ---------- | ------- | ------------------------------------------------- |
+| provider | string | [Required] Provider of message queue (Allowed values: sqs) |
+| sqs | SQS | [Required if provider=sqs] SQS message queue inputs |
+
+SQS message queue inputs can be found in the table below.
+
+| Key | Type | Description |
+| ---------- | ------- | ------------------------------------------------- |
+| name | string | [Required] Name of the queue |
+| region | string | [Required] Region where the queue is located |
+| endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint
+| dlq | string | [Required] Name of the dead letter queue |
+
+Change of any of the queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed.
+
## ClusterManager Resource Spec Parameters
ClusterManager resource does not have a required spec parameter, but to configure SmartStore, you can specify indexes and volume configuration as below -
```yaml
@@ -328,6 +366,59 @@ the `IndexerCluster` resource provides the following `Spec` configuration parame
| ---------- | ------- | ----------------------------------------------------- |
| replicas | integer | The number of indexer cluster members (minimum of 3, which is the default) |
+## IngestorCluster Resource Spec Parameters
+
+```yaml
+apiVersion: enterprise.splunk.com/v4
+kind: IngestorCluster
+metadata:
+ name: ic
+spec:
+ replicas: 3
+ queueRef:
+ name: queue
+ objectStorageRef:
+ name: os
+```
+Note: `queueRef` and `objectStorageRef` are required fields in case of IngestorCluster resource since they will be used to connect the IngestorCluster to Queue and ObjectStorage resources.
+
+In addition to [Common Spec Parameters for All Resources](#common-spec-parameters-for-all-resources)
+and [Common Spec Parameters for All Splunk Enterprise Resources](#common-spec-parameters-for-all-splunk-enterprise-resources),
+the `IngestorCluster` resource provides the following `Spec` configuration parameters:
+
+| Key | Type | Description |
+| ---------- | ------- | ----------------------------------------------------- |
+| replicas | integer | The number of ingestor peers (minimum of 3 which is the default) |
+
+## ObjectStorage Resource Spec Parameters
+
+```yaml
+apiVersion: enterprise.splunk.com/v4
+kind: ObjectStorage
+metadata:
+ name: os
+spec:
+ provider: s3
+ s3:
+ path: ingestion/smartbus-test
+ endpoint: https://s3.us-west-2.amazonaws.com
+```
+
+ObjectStorage inputs can be found in the table below. As of now, only S3 provider of object storage is supported.
+
+| Key | Type | Description |
+| ---------- | ------- | ------------------------------------------------- |
+| provider | string | [Required] Provider of object storage (Allowed values: s3) |
+| s3 | S3 | [Required if provider=s3] S3 object storage inputs |
+
+S3 object storage inputs can be found in the table below.
+
+| Key | Type | Description |
+| ---------- | ------- | ------------------------------------------------- |
+| path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size |
+| endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint
+
+Change of any of the object storage inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed.
## MonitoringConsole Resource Spec Parameters
@@ -440,9 +531,12 @@ The Splunk Operator controller reconciles every Splunk Enterprise CR. However, t
| Customer Resource Definition | Annotation |
| ----------- | --------- |
+| queue.enterprise.splunk.com | "queue.enterprise.splunk.com/paused" |
| clustermaster.enterprise.splunk.com | "clustermaster.enterprise.splunk.com/paused" |
| clustermanager.enterprise.splunk.com | "clustermanager.enterprise.splunk.com/paused" |
| indexercluster.enterprise.splunk.com | "indexercluster.enterprise.splunk.com/paused" |
+| ingestorcluster.enterprise.splunk.com | "ingestorcluster.enterprise.splunk.com/paused" |
+| objectstorage.enterprise.splunk.com | "objectstorage.enterprise.splunk.com/paused" |
| licensemaster.enterprise.splunk.com | "licensemaster.enterprise.splunk.com/paused" |
| monitoringconsole.enterprise.splunk.com | "monitoringconsole.enterprise.splunk.com/paused" |
| searchheadcluster.enterprise.splunk.com | "searchheadcluster.enterprise.splunk.com/paused" |
@@ -512,6 +606,7 @@ Below is a table listing `app.kubernetes.io/name` values mapped to CRDs
| clustermanager.enterprise.splunk.com | cluster-manager |
| clustermaster.enterprise.splunk.com | cluster-master |
| indexercluster.enterprise.splunk.com | indexer-cluster |
+| ingestorcluster.enterprise.splunk.com | ingestor-cluster |
| licensemanager.enterprise.splunk.com | license-manager |
| licensemaster.enterprise.splunk.com | license-master |
| monitoringconsole.enterprise.splunk.com | monitoring-console |
diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md
new file mode 100644
index 000000000..1a06606fe
--- /dev/null
+++ b/docs/IndexIngestionSeparation.md
@@ -0,0 +1,1159 @@
+---
+title: Index and Ingestion Separation
+parent: Deploy & Configure
+nav_order: 6
+---
+
+# Background
+
+Separation between ingestion and indexing services within Splunk Operator for Kubernetes enables the operator to independently manage the ingestion service while maintaining seamless integration with the indexing service.
+
+This separation enables:
+- Independent scaling: Match resource allocation to ingestion or indexing workload.
+- Data durability: Off‑load buffer management and retry logic to a durable message queue.
+- Operational clarity: Separate monitoring dashboards for ingestion throughput vs indexing latency.
+
+## Splunk Support
+
+These features are supported for Splunk 10.2 and above versions.
+
+# Important Note
+
+> [!WARNING]
+> **For customers deploying SmartBus on CMP, the Splunk Operator for Kubernetes (SOK) manages the configuration and lifecycle of the ingestor tier. The following SOK guide provides implementation details for setting up ingestion separation and integrating with existing indexers. This reference is primarily intended for CMP users leveraging SOK-managed ingestors.**
+
+# Document Variables
+
+- SPLUNK_IMAGE_VERSION: Splunk Enterprise Docker Image version
+
+# Queue
+
+Queue is introduced to store message queue information to be shared among IngestorCluster and IndexerCluster.
+
+## Spec
+
+Queue inputs can be found in the table below. As of now, only SQS provider of message queue is supported.
+
+| Key | Type | Description |
+| ---------- | ------- | ------------------------------------------------- |
+| provider | string | [Required] Provider of message queue (Allowed values: sqs, sqs_cp) |
+| sqs | SQS | [Required if provider=sqs or provider=sqs_cp] SQS message queue inputs |
+
+SQS message queue inputs can be found in the table below.
+
+| Key | Type | Description |
+| ---------- | ------- | ------------------------------------------------- |
+| name | string | [Required] Name of the queue |
+| authRegion | string | [Required] Region where the queue is located |
+| endpoint | string | [Optional, if not provided formed based on authRegion] AWS SQS Service endpoint
+| dlq | string | [Required] Name of the dead letter queue |
+| volumes | []VolumeSpec | [Optional] List of remote storage volumes used to mount the credentials for queue and bucket access (must contain s3_access_key and s3_secret_key) |
+
+**SOK doesn't support update of any of the Queue inputs except from the volumes which allow the change of secrets.**
+
+## Example
+```
+apiVersion: enterprise.splunk.com/v4
+kind: Queue
+metadata:
+ name: queue
+spec:
+ provider: sqs
+ sqs:
+ name: sqs-test
+ authRegion: us-west-2
+ endpoint: https://sqs.us-west-2.amazonaws.com
+ dlq: sqs-dlq-test
+ volumes:
+ - name: s3-sqs-volume
+ secretRef: s3-secret
+```
+
+# ObjectStorage
+
+ObjectStorage is introduced to store large messages (messages that exceed the size of messages that can be stored in SQS) to be shared among IngestorCluster and IndexerCluster.
+
+## Spec
+
+ObjectStorage inputs can be found in the table below. As of now, only S3 provider of object storage is supported.
+
+| Key | Type | Description |
+| ---------- | ------- | ------------------------------------------------- |
+| provider | string | [Required] Provider of object storage (Allowed values: s3) |
+| s3 | S3 | [Required if provider=s3] S3 object storage inputs |
+
+S3 object storage inputs can be found in the table below.
+
+| Key | Type | Description |
+| ---------- | ------- | ------------------------------------------------- |
+| path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size |
+| endpoint | string | [Optional, if not provided formed based on authRegion] S3-compatible service endpoint
+
+**SOK doesn't support update of any of the ObjectStorage inputs.**
+
+## Example
+```
+apiVersion: enterprise.splunk.com/v4
+kind: ObjectStorage
+metadata:
+ name: os
+spec:
+ provider: s3
+ s3:
+ path: ingestion/smartbus-test
+ endpoint: https://s3.us-west-2.amazonaws.com
+```
+
+# IngestorCluster
+
+IngestorCluster is introduced for high‑throughput data ingestion into a durable message queue. Its Splunk pods are configured to receive events (outputs.conf) and publish them to a message queue.
+
+## Spec
+
+In addition to common spec inputs, the IngestorCluster resource provides the following Spec configuration parameters.
+
+| Key | Type | Description |
+| ---------- | ------- | ------------------------------------------------- |
+| replicas | integer | The number of replicas (defaults to 3) |
+| queueRef | corev1.ObjectReference | Message queue reference |
+| objectStorageRef | corev1.ObjectReference | Object storage reference |
+
+**SOK doesn't support update of queueRef and objectStorageRef.**
+
+**First provisioning or scaling up the number of replicas requires Ingestor Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.**
+
+## Example
+
+The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the s3-secret credentials allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the ingestion process.
+
+In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly.
+
+```
+apiVersion: enterprise.splunk.com/v4
+kind: IngestorCluster
+metadata:
+ name: ingestor
+ finalizers:
+ - enterprise.splunk.com/delete-pvc
+spec:
+ serviceAccount: ingestor-sa
+ replicas: 3
+ image: splunk/splunk:${SPLUNK_IMAGE_VERSION}
+ queueRef:
+ name: queue
+ objectStorageRef:
+ name: os
+```
+
+# IndexerCluster
+
+IndexerCluster is enhanced to support index‑only mode enabling independent scaling, loss‑safe buffering, and simplified day‑0/day‑n management via Kubernetes CRDs. Its Splunk pods are configured to pull events from the queue (inputs.conf) and index them.
+
+## Spec
+
+In addition to common spec inputs, the IndexerCluster resource provides the following Spec configuration parameters.
+
+| Key | Type | Description |
+| ---------- | ------- | ------------------------------------------------- |
+| replicas | integer | The number of replicas (defaults to 3) |
+| queueRef | corev1.ObjectReference | Message queue reference |
+| objectStorageRef | corev1.ObjectReference | Object storage reference |
+
+**SOK doesn't support update of queueRef and objectStorageRef.**
+
+**First provisioning or scaling up the number of replicas requires Indexer Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.**
+
+## Example
+
+The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the s3-secret credentials allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the indexing process.
+
+In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly.
+
+```
+apiVersion: enterprise.splunk.com/v4
+kind: ClusterManager
+metadata:
+ name: cm
+ finalizers:
+ - enterprise.splunk.com/delete-pvc
+spec:
+ serviceAccount: ingestor-sa
+ image: splunk/splunk:${SPLUNK_IMAGE_VERSION}
+---
+apiVersion: enterprise.splunk.com/v4
+kind: IndexerCluster
+metadata:
+ name: indexer
+ finalizers:
+ - enterprise.splunk.com/delete-pvc
+spec:
+ clusterManagerRef:
+ name: cm
+ serviceAccount: ingestor-sa
+ replicas: 3
+ image: splunk/splunk:${SPLUNK_IMAGE_VERSION}
+ queueRef:
+ name: queue
+ objectStorageRef:
+ name: os
+```
+
+# Common Spec
+
+Common spec values for all SOK Custom Resources can be found in [CustomResources doc](CustomResources.md).
+
+# Helm Charts
+
+Queue, ObjectStorage and IngestorCluster have been added to the splunk/splunk-enterprise Helm chart. IndexerCluster has also been enhanced to support new inputs.
+
+## Example
+
+Below examples describe how to define values for Queue, ObjectStorage, IngestorCluster and IndexerCluster similarly to the above yaml files specifications.
+
+```
+queue:
+ enabled: true
+ name: queue
+ provider: sqs
+ sqs:
+ name: sqs-test
+ authRegion: us-west-2
+ endpoint: https://sqs.us-west-2.amazonaws.com
+ dlq: sqs-dlq-test
+ volumes:
+ - name: s3-sqs-volume
+ secretRef: s3-secret
+```
+
+```
+objectStorage:
+ enabled: true
+ name: os
+ provider: s3
+ s3:
+ endpoint: https://s3.us-west-2.amazonaws.com
+ path: ingestion/smartbus-test
+```
+
+```
+ingestorCluster:
+ enabled: true
+ name: ingestor
+ replicaCount: 3
+ serviceAccount: ingestor-sa
+ queueRef:
+ name: queue
+ objectStorageRef:
+ name: os
+```
+
+```
+clusterManager:
+ enabled: true
+ name: cm
+ replicaCount: 1
+ serviceAccount: ingestor-sa
+
+indexerCluster:
+ enabled: true
+ name: indexer
+ replicaCount: 3
+ serviceAccount: ingestor-sa
+ clusterManagerRef:
+ name: cm
+ queueRef:
+ name: queue
+ objectStorageRef:
+ name: os
+```
+
+# Service Account
+
+To be able to configure ingestion and indexing resources correctly in a secure manner, it is required to provide these resources with the service account that is configured with a minimum set of permissions to complete required operations. With this provided, the right credentials are used by Splunk to peform its tasks.
+
+## Example
+
+The example presented below configures the ingestor-sa service account by using eksctl utility. It sets up the service account for cluster-name cluster in region us-west-2 with AmazonS3FullAccess and AmazonSQSFullAccess access policies.
+
+```
+eksctl create iamserviceaccount \
+ --name ingestor-sa \
+ --cluster ind-ing-sep-demo \
+ --region us-west-2 \
+ --attach-policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess \
+ --attach-policy-arn arn:aws:iam::aws:policy/AmazonSQSFullAccess \
+ --approve \
+ --override-existing-serviceaccounts
+```
+
+```
+$ kubectl describe sa ingestor-sa
+Name: ingestor-sa
+Namespace: default
+Labels: app.kubernetes.io/managed-by=eksctl
+Annotations: eks.amazonaws.com/role-arn: arn:aws:iam::111111111111:role/eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123
+Image pull secrets:
+Mountable secrets:
+Tokens:
+Events:
+```
+
+```
+$ aws iam get-role --role-name eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123
+{
+ "Role": {
+ "Path": "/",
+ "RoleName": "eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123",
+ "RoleId": "123456789012345678901",
+ "Arn": "arn:aws:iam::111111111111:role/eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123",
+ "CreateDate": "2025-08-07T12:03:31+00:00",
+ "AssumeRolePolicyDocument": {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Federated": "arn:aws:iam::111111111111:oidc-provider/oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901"
+ },
+ "Action": "sts:AssumeRoleWithWebIdentity",
+ "Condition": {
+ "StringEquals": {
+ "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:aud": "sts.amazonaws.com",
+ "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:sub": "system:serviceaccount:default:ingestor-sa"
+ }
+ }
+ }
+ ]
+ },
+ "Description": "",
+ "MaxSessionDuration": 3600,
+ "Tags": [
+ {
+ "Key": "alpha.eksctl.io/cluster-name",
+ "Value": "ind-ing-sep-demo"
+ },
+ {
+ "Key": "alpha.eksctl.io/iamserviceaccount-name",
+ "Value": "default/ingestor-sa"
+ },
+ {
+ "Key": "alpha.eksctl.io/eksctl-version",
+ "Value": "0.211.0"
+ },
+ {
+ "Key": "eksctl.cluster.k8s.io/v1alpha1/cluster-name",
+ "Value": "ind-ing-sep-demo"
+ }
+ ],
+ "RoleLastUsed": {
+ "LastUsedDate": "2025-08-18T08:47:27+00:00",
+ "Region": "us-west-2"
+ }
+ }
+}
+```
+
+```
+$ aws iam list-attached-role-policies --role-name eksctl-cluster-name-addon-iamserviceac-Role1-123456789123
+{
+ "AttachedPolicies": [
+ {
+ "PolicyName": "AmazonSQSFullAccess",
+ "PolicyArn": "arn:aws:iam::aws:policy/AmazonSQSFullAccess"
+ },
+ {
+ "PolicyName": "AmazonS3FullAccess",
+ "PolicyArn": "arn:aws:iam::aws:policy/AmazonS3FullAccess"
+ }
+ ]
+}
+```
+
+## Documentation References
+
+- [IAM Roles for Service Accounts on eksctl Docs](https://eksctl.io/usage/iamserviceaccounts/)
+
+# Horizontal Pod Autoscaler
+
+To automatically adjust the number of replicas to serve the ingestion traffic effectively, it is recommended to use Horizontal Pod Autoscaler which scales the workload based on the actual demand. It enables the user to provide the metrics which are used to make decisions on removing unwanted replicas if there is not too much traffic or setting up the new ones if the traffic is too big to be handled by currently running resources.
+
+## Example
+
+The exmaple presented below configures HorizontalPodAutoscaler named ingestor-hpa that resides in a default namespace (same namespace as resources it is managing) to scale IngestorCluster custom resource named ingestor. With average utilization set to 50, the HorizontalPodAutoscaler resource will try to keep the average utilization of the pods in the scaling target at 50%. It will be able to scale the replicas starting from the minimum number of 3 with the maximum number of 10 replicas.
+
+```
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: ingestor-hpa
+spec:
+ scaleTargetRef:
+ apiVersion: enterprise.splunk.com/v4
+ kind: IngestorCluster
+ name: ingestor
+ minReplicas: 3
+ maxReplicas: 10
+ metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 50
+```
+
+## Documentation References
+
+- [Horizontal Pod Autoscaling on Kubernetes Docs](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/)
+
+# Grafana
+
+In order to monitor the resources, Grafana could be installed and configured on the cluster to present the setup on a dashabord in a series of useful diagrams and metrics.
+
+## Example
+
+In the following example, the dashboard presents ingestion and indexing data in the form of useful diagrams and metrics such as number of replicas or resource consumption.
+
+```
+{
+ "id": null,
+ "uid": "splunk-autoscale",
+ "title": "Splunk Ingestion & Indexer Autoscaling with I/O & PV",
+ "schemaVersion": 27,
+ "version": 12,
+ "refresh": "5s",
+ "time": { "from": "now-30m", "to": "now" },
+ "timezone": "browser",
+ "style": "dark",
+ "tags": ["splunk","autoscale","ingestion","indexer","io","pv"],
+ "graphTooltip": 1,
+ "panels": [
+ { "id": 1, "type": "stat", "title": "Ingestion Replicas", "gridPos": {"x":0,"y":0,"w":4,"h":4}, "targets":[{"expr":"kube_statefulset_replicas{namespace=\"default\",statefulset=\"splunk-ingestor-ingestor\"}"}], "options": {"reduceOptions":{"calcs":["last"]},"orientation":"horizontal","colorMode":"value","graphMode":"none","textMode":"value","thresholds":{"mode":"absolute","steps":[{"value":null,"color":"#73BF69"},{"value":5,"color":"#EAB839"},{"value":8,"color":"#BF1B00"}]}}},
+ { "id": 2, "type": "stat", "title": "Indexer Replicas", "gridPos": {"x":4,"y":0,"w":4,"h":4}, "targets":[{"expr":"kube_statefulset_replicas{namespace=\"default\",statefulset=\"splunk-indexer-indexer\"}"}], "options": {"reduceOptions":{"calcs":["last"]},"orientation":"horizontal","colorMode":"value","graphMode":"none","textMode":"value","thresholds":{"mode":"absolute","steps":[{"value":null,"color":"#73BF69"},{"value":5,"color":"#EAB839"},{"value":8,"color":"#BF1B00"}]}}},
+ { "id": 3, "type": "timeseries","title": "Ingestion CPU (cores)","gridPos": {"x":8,"y":0,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_cpu_usage_seconds_total{namespace=\"default\",pod=~\"splunk-ingestor-ingestor-.*\"}[1m]))","legendFormat":"CPU (cores)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#FFA600"}}},
+ { "id": 4, "type": "timeseries","title": "Ingestion Memory (MiB)","gridPos": {"x":16,"y":0,"w":8,"h":4},"targets":[{"expr":"sum(container_memory_usage_bytes{namespace=\"default\",pod=~\"splunk-ingestor-ingestor-.*\"}) / 1024 / 1024","legendFormat":"Memory (MiB)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#00AF91"}}},
+ { "id": 5, "type": "timeseries","title": "Ingestion Network In (KB/s)","gridPos": {"x":0,"y":8,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_network_receive_bytes_total{namespace=\"default\",pod=~\"splunk-ingestor-ingestor-.*\"}[1m])) / 1024","legendFormat":"Net In (KB/s)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#59A14F"}}},
+ { "id": 6, "type": "timeseries","title": "Ingestion Network Out (KB/s)","gridPos": {"x":8,"y":8,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_network_transmit_bytes_total{namespace=\"default\",pod=~\"splunk-ingestor-ingestor-.*\"}[1m])) / 1024","legendFormat":"Net Out (KB/s)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#E15759"}}},
+ { "id": 7, "type": "timeseries","title": "Indexer CPU (cores)","gridPos": {"x":16,"y":4,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_cpu_usage_seconds_total{namespace=\"default\",pod=~\"splunk-indexer-indexer-.*\"}[1m]))","legendFormat":"CPU (cores)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#7D4E57"}}},
+ { "id":8, "type": "timeseries","title": "Indexer Memory (MiB)","gridPos": {"x":0,"y":12,"w":8,"h":4},"targets":[{"expr":"sum(container_memory_usage_bytes{namespace=\"default\",pod=~\"splunk-indexer-indexer-.*\"}) / 1024 / 1024","legendFormat":"Memory (MiB)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#4E79A7"}}},
+ { "id":9, "type": "timeseries","title": "Indexer Network In (KB/s)","gridPos": {"x":8,"y":12,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_network_receive_bytes_total{namespace=\"default\",pod=~\"splunk-indexer-indexer-.*\"}[1m])) / 1024","legendFormat":"Net In (KB/s)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#9467BD"}}},
+ { "id":10, "type": "timeseries","title": "Indexer Network Out (KB/s)","gridPos": {"x":16,"y":12,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_network_transmit_bytes_total{namespace=\"default\",pod=~\"splunk-indexer-indexer-.*\"}[1m])) / 1024","legendFormat":"Net Out (KB/s)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#8C564B"}}},
+ { "id":11, "type": "timeseries","title": "Ingestion Disk Read (KB/s)","gridPos": {"x":0,"y":16,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_fs_reads_bytes_total{namespace=\"default\",pod=~\"splunk-ingestor-ingestor-.*\"}[1m])) / 1024","legendFormat":"Disk Read (KB/s)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#1F77B4"}}},
+ { "id":12, "type": "timeseries","title": "Ingestion Disk Write (KB/s)","gridPos": {"x":8,"y":16,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_fs_writes_bytes_total{namespace=\"default\",pod=~\"splunk-ingestor-ingestor-.*\"}[1m])) / 1024","legendFormat":"Disk Write (KB/s)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#FF7F0E"}}},
+ { "id":13, "type": "timeseries","title": "Indexer PV Usage (GiB)","gridPos": {"x":0,"y":20,"w":8,"h":4},"targets":[{"expr":"kubelet_volume_stats_used_bytes{namespace=\"default\",persistentvolumeclaim=~\".*-indexer-.*\"} / 1024 / 1024 / 1024","legendFormat":"Used GiB"},{"expr":"kubelet_volume_stats_capacity_bytes{namespace=\"default\",persistentvolumeclaim=~\".*-indexer-.*\"} / 1024 / 1024 / 1024","legendFormat":"Capacity GiB"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"}}},
+ { "id":14, "type": "timeseries","title": "Ingestion PV Usage (GiB)","gridPos": {"x":8,"y":20,"w":8,"h":4},"targets":[{"expr":"kubelet_volume_stats_used_bytes{namespace=\"default\",persistentvolumeclaim=~\".*-ingestor-.*\"} / 1024 / 1024 / 1024","legendFormat":"Used GiB"},{"expr":"kubelet_volume_stats_capacity_bytes{namespace=\"default\",persistentvolumeclaim=~\".*-ingestor-.*\"} / 1024 / 1024 / 1024","legendFormat":"Capacity GiB"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"}}}
+ ]
+}
+```
+
+## Documentation References
+
+- [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack)
+
+# App Installation for Ingestor Cluster Instances
+
+Application installation is supported for Ingestor Cluster instances. However, as of now, applications are installed using local scope and if any application requires Splunk restart, there is no automated way to detect it and trigger automatically via Splunk Operator.
+
+Therefore, to be able to enforce Splunk restart for each of the Ingestor Cluster pods, it is recommended to add/update IngestorCluster CR annotations/labels and apply the new configuration which will trigger the rolling restart of Splunk pods for Ingestor Cluster.
+
+Ideally, update of annotations and labels should not trigger pod restart at all and it is under the investigation on how to stop this from happening and handle restart automatically.
+
+# Example
+
+1. Install CRDs and Splunk Operator for Kubernetes.
+
+- SOK_IMAGE_VERSION: version of the image for Splunk Operator for Kubernetes
+
+```
+$ make install
+```
+
+```
+$ kubectl apply -f ${SOK_IMAGE_VERSION}/splunk-operator-cluster.yaml --server-side
+```
+
+```
+$ kubectl get po -n splunk-operator
+NAME READY STATUS RESTARTS AGE
+splunk-operator-controller-manager-785b89d45c-dwfkd 2/2 Running 0 4d3h
+```
+
+2. Create a service account.
+
+```
+$ eksctl create iamserviceaccount \
+ --name ingestor-sa \
+ --cluster ind-ing-sep-demo \
+ --region us-west-2 \
+ --attach-policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess \
+ --attach-policy-arn arn:aws:iam::aws:policy/AmazonSQSFullAccess \
+ --approve \
+ --override-existing-serviceaccounts
+```
+
+```
+$ kubectl describe sa ingestor-sa
+Name: ingestor-sa
+Namespace: default
+Labels: app.kubernetes.io/managed-by=eksctl
+Annotations: eks.amazonaws.com/role-arn: arn:aws:iam::111111111111:role/eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123
+Image pull secrets:
+Mountable secrets:
+Tokens:
+Events:
+```
+
+```
+$ aws iam get-role --role-name eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123
+{
+ "Role": {
+ "Path": "/",
+ "RoleName": "eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123",
+ "RoleId": "123456789012345678901",
+ "Arn": "arn:aws:iam::111111111111:role/eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123",
+ "CreateDate": "2025-08-07T12:03:31+00:00",
+ "AssumeRolePolicyDocument": {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Federated": "arn:aws:iam::111111111111:oidc-provider/oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901"
+ },
+ "Action": "sts:AssumeRoleWithWebIdentity",
+ "Condition": {
+ "StringEquals": {
+ "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:aud": "sts.amazonaws.com",
+ "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:sub": "system:serviceaccount:default:ingestor-sa"
+ }
+ }
+ }
+ ]
+ },
+ "Description": "",
+ "MaxSessionDuration": 3600,
+ "Tags": [
+ {
+ "Key": "alpha.eksctl.io/cluster-name",
+ "Value": "ind-ing-sep-demo"
+ },
+ {
+ "Key": "alpha.eksctl.io/iamserviceaccount-name",
+ "Value": "default/ingestor-sa"
+ },
+ {
+ "Key": "alpha.eksctl.io/eksctl-version",
+ "Value": "0.211.0"
+ },
+ {
+ "Key": "eksctl.cluster.k8s.io/v1alpha1/cluster-name",
+ "Value": "ind-ing-sep-demo"
+ }
+ ],
+ "RoleLastUsed": {
+ "LastUsedDate": "2025-08-18T08:47:27+00:00",
+ "Region": "us-west-2"
+ }
+ }
+}
+```
+
+```
+$ aws iam list-attached-role-policies --role-name eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123
+{
+ "AttachedPolicies": [
+ {
+ "PolicyName": "AmazonSQSFullAccess",
+ "PolicyArn": "arn:aws:iam::aws:policy/AmazonSQSFullAccess"
+ },
+ {
+ "PolicyName": "AmazonS3FullAccess",
+ "PolicyArn": "arn:aws:iam::aws:policy/AmazonS3FullAccess"
+ }
+ ]
+}
+```
+
+3. Install Queue resource.
+
+```
+$ cat queue.yaml
+apiVersion: enterprise.splunk.com/v4
+kind: Queue
+metadata:
+ name: queue
+ finalizers:
+ - enterprise.splunk.com/delete-pvc
+spec:
+ provider: sqs
+ sqs:
+ name: sqs-test
+ authRegion: us-west-2
+ endpoint: https://sqs.us-west-2.amazonaws.com
+ dlq: sqs-dlq-test
+```
+
+```
+$ kubectl apply -f queue.yaml
+```
+
+```
+$ kubectl get queue
+NAME PHASE AGE MESSAGE
+queue Ready 20s
+```
+
+```
+kubectl describe queue
+Name: queue
+Namespace: default
+Labels:
+Annotations:
+API Version: enterprise.splunk.com/v4
+Kind: Queue
+Metadata:
+ Creation Timestamp: 2025-10-27T10:25:53Z
+ Finalizers:
+ enterprise.splunk.com/delete-pvc
+ Generation: 1
+ Resource Version: 12345678
+ UID: 12345678-1234-5678-1234-012345678911
+Spec:
+ Sqs:
+ Auth Region: us-west-2
+ DLQ: sqs-dlq-test
+ Endpoint: https://sqs.us-west-2.amazonaws.com
+ Name: sqs-test
+ Provider: sqs
+Status:
+ Message:
+ Phase: Ready
+ Resource Rev Map:
+Events:
+```
+
+4. Install ObjectStorage resource.
+
+```
+$ cat os.yaml
+apiVersion: enterprise.splunk.com/v4
+kind: ObjectStorage
+metadata:
+ name: os
+ finalizers:
+ - enterprise.splunk.com/delete-pvc
+spec:
+ provider: s3
+ s3:
+ endpoint: https://s3.us-west-2.amazonaws.com
+ path: ingestion/smartbus-test
+```
+
+```
+$ kubectl apply -f os.yaml
+```
+
+```
+$ kubectl get os
+NAME PHASE AGE MESSAGE
+os Ready 20s
+```
+
+```
+kubectl describe os
+Name: os
+Namespace: default
+Labels:
+Annotations:
+API Version: enterprise.splunk.com/v4
+Kind: ObjectStorage
+Metadata:
+ Creation Timestamp: 2025-10-27T10:25:53Z
+ Finalizers:
+ enterprise.splunk.com/delete-pvc
+ Generation: 1
+ Resource Version: 12345678
+ UID: 12345678-1234-5678-1234-012345678911
+Spec:
+ S3:
+ Endpoint: https://s3.us-west-2.amazonaws.com
+ Path: ingestion/smartbus-test
+ Provider: s3
+Status:
+ Message:
+ Phase: Ready
+ Resource Rev Map:
+Events:
+```
+
+5. Install IngestorCluster resource.
+
+```
+$ cat ingestor.yaml
+apiVersion: enterprise.splunk.com/v4
+kind: IngestorCluster
+metadata:
+ name: ingestor
+ finalizers:
+ - enterprise.splunk.com/delete-pvc
+spec:
+ serviceAccount: ingestor-sa
+ replicas: 3
+ image: splunk/splunk:${SPLUNK_IMAGE_VERSION}
+ queueRef:
+ name: queue
+ objectStorageRef:
+ name: os
+```
+
+```
+$ kubectl apply -f ingestor.yaml
+```
+
+```
+$ kubectl get po
+NAME READY STATUS RESTARTS AGE
+splunk-ingestor-ingestor-0 1/1 Running 0 2m12s
+splunk-ingestor-ingestor-1 1/1 Running 0 2m12s
+splunk-ingestor-ingestor-2 1/1 Running 0 2m12s
+```
+
+```
+$ kubectl describe ingestorcluster ingestor
+Name: ingestor
+Namespace: default
+Labels:
+Annotations:
+API Version: enterprise.splunk.com/v4
+Kind: IngestorCluster
+Metadata:
+ Creation Timestamp: 2025-08-18T09:49:45Z
+ Generation: 1
+ Resource Version: 12345678
+ UID: 12345678-1234-1234-1234-1234567890123
+Spec:
+ Queue Ref:
+ Name: queue
+ Namespace: default
+ Image: splunk/splunk:${SPLUNK_IMAGE_VERSION}
+ Object Storage Ref:
+ Name: os
+ Namespace: default
+ Replicas: 3
+ Service Account: ingestor-sa
+Status:
+ App Context:
+ App Repo:
+ App Install Period Seconds: 90
+ Defaults:
+ Premium Apps Props:
+ Es Defaults:
+ Install Max Retries: 2
+ Bundle Push Status:
+ Is Deployment In Progress: false
+ Last App Info Check Time: 0
+ Version: 0
+ Credential Secret Version: 33744270
+ Message:
+ Phase: Ready
+ Ready Replicas: 3
+ Replicas: 3
+ Resource Rev Map:
+ Selector: app.kubernetes.io/instance=splunk-ingestor-ingestor
+ Tel App Installed: true
+Events:
+```
+
+```
+$ kubectl exec -it splunk-ingestor-ingestor-0 -- sh
+$ kubectl exec -it splunk-ingestor-ingestor-1 -- sh
+$ kubectl exec -it splunk-ingestor-ingestor-2 -- sh
+sh-4.4$ env | grep AWS
+AWS_DEFAULT_REGION=us-west-2
+AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token
+AWS_REGION=us-west-2
+AWS_ROLE_ARN=arn:aws:iam::111111111111:role/eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123
+AWS_STS_REGIONAL_ENDPOINTS=regional
+sh-4.4$ cat /opt/splunk/etc/system/local/default-mode.conf
+[pipeline:remotequeueruleset]
+disabled = false
+
+[pipeline:ruleset]
+disabled = true
+
+[pipeline:remotequeuetyping]
+disabled = false
+
+[pipeline:remotequeueoutput]
+disabled = false
+
+[pipeline:typing]
+disabled = true
+
+[pipeline:indexerPipe]
+disabled = true
+
+sh-4.4$ cat /opt/splunk/etc/system/local/outputs.conf
+[remote_queue:sqs-test]
+remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4
+remote_queue.sqs_smartbus.auth_region = us-west-2
+remote_queue.sqs_smartbus.dead_letter_queue.name = sqs-dlq-test
+remote_queue.sqs_smartbus.encoding_format = s2s
+remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com
+remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com
+remote_queue.sqs_smartbus.large_message_store.path = s3://ingestion/smartbus-test
+remote_queue.sqs_smartbus.retry_policy = max_count
+remote_queue.sqs_smartbus.send_interval = 5s
+remote_queue.type = sqs_smartbus
+```
+
+6. Install IndexerCluster resource.
+
+```
+$ cat idxc.yaml
+apiVersion: enterprise.splunk.com/v4
+kind: ClusterManager
+metadata:
+ name: cm
+ finalizers:
+ - enterprise.splunk.com/delete-pvc
+spec:
+ image: splunk/splunk:${SPLUNK_IMAGE_VERSION}
+ serviceAccount: ingestor-sa
+---
+apiVersion: enterprise.splunk.com/v4
+kind: IndexerCluster
+metadata:
+ name: indexer
+ finalizers:
+ - enterprise.splunk.com/delete-pvc
+spec:
+ image: splunk/splunk:${SPLUNK_IMAGE_VERSION}
+ replicas: 3
+ clusterManagerRef:
+ name: cm
+ serviceAccount: ingestor-sa
+ queueRef:
+ name: queue
+ objectStorageRef:
+ name: os
+```
+
+```
+$ kubectl apply -f idxc.yaml
+```
+
+```
+$ kubectl get po
+NAME READY STATUS RESTARTS AGE
+splunk-cm-cluster-manager-0 1/1 Running 0 15m
+splunk-indexer-indexer-0 1/1 Running 0 12m
+splunk-indexer-indexer-1 1/1 Running 0 12m
+splunk-indexer-indexer-2 1/1 Running 0 12m
+splunk-ingestor-ingestor-0 1/1 Running 0 27m
+splunk-ingestor-ingestor-1 1/1 Running 0 29m
+splunk-ingestor-ingestor-2 1/1 Running 0 31m
+```
+
+```
+$ kubectl exec -it splunk-indexer-indexer-0 -- sh
+$ kubectl exec -it splunk-indexer-indexer-1 -- sh
+$ kubectl exec -it splunk-indexer-indexer-2 -- sh
+sh-4.4$ env | grep AWS
+AWS_DEFAULT_REGION=us-west-2
+AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token
+AWS_REGION=us-west-2
+AWS_ROLE_ARN=arn:aws:iam::111111111111:role/eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123
+AWS_STS_REGIONAL_ENDPOINTS=regional
+sh-4.4$ cat /opt/splunk/etc/system/local/inputs.conf
+
+[splunktcp://9997]
+disabled = 0
+
+[remote_queue:sqs-test]
+remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4
+remote_queue.sqs_smartbus.auth_region = us-west-2
+remote_queue.sqs_smartbus.dead_letter_queue.name = sqs-dlq-test
+remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com
+remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com
+remote_queue.sqs_smartbus.large_message_store.path = s3://ingestion/smartbus-test
+remote_queue.sqs_smartbus.retry_policy = max_count
+remote_queue.type = sqs_smartbus
+sh-4.4$ cat /opt/splunk/etc/system/local/outputs.conf
+[remote_queue:sqs-test]
+remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4
+remote_queue.sqs_smartbus.auth_region = us-west-2
+remote_queue.sqs_smartbus.dead_letter_queue.name = sqs-dlq-test
+remote_queue.sqs_smartbus.encoding_format = s2s
+remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com
+remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com
+remote_queue.sqs_smartbus.large_message_store.path = s3://ingestion/smartbus-test
+remote_queue.sqs_smartbus.retry_policy = max_count
+remote_queue.sqs_smartbus.send_interval = 5s
+remote_queue.type = sqs_smartbus
+sh-4.4$ cat /opt/splunk/etc/system/local/default-mode.conf
+[pipeline:remotequeueruleset]
+disabled = false
+
+[pipeline:ruleset]
+disabled = true
+
+[pipeline:remotequeuetyping]
+disabled = false
+
+[pipeline:remotequeueoutput]
+disabled = false
+
+[pipeline:typing]
+disabled = true
+```
+
+7. Install Horizontal Pod Autoscaler for IngestorCluster.
+
+```
+$ cat hpa-ing.yaml
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: ing-hpa
+spec:
+ scaleTargetRef:
+ apiVersion: enterprise.splunk.com/v4
+ kind: IngestorCluster
+ name: ingestor
+ minReplicas: 3
+ maxReplicas: 10
+ metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 50
+```
+
+```
+$ kubectl apply -f hpa-ing.yaml
+```
+
+```
+$ kubectl get hpa
+NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
+ing-hpa IngestorCluster/ingestor cpu: /50% 3 10 0 10s
+```
+
+```
+kubectl top pod
+NAME CPU(cores) MEMORY(bytes)
+hec-locust-load-29270124-f86gj 790m 221Mi
+splunk-cm-cluster-manager-0 154m 1696Mi
+splunk-indexer-indexer-0 107m 1339Mi
+splunk-indexer-indexer-1 187m 1052Mi
+splunk-indexer-indexer-2 203m 1703Mi
+splunk-ingestor-ingestor-0 97m 517Mi
+splunk-ingestor-ingestor-1 64m 585Mi
+splunk-ingestor-ingestor-2 57m 565Mi
+```
+
+```
+$ kubectl get po
+NAME READY STATUS RESTARTS AGE
+hec-locust-load-29270126-szgv2 1/1 Running 0 30s
+splunk-cm-cluster-manager-0 1/1 Running 0 41m
+splunk-indexer-indexer-0 1/1 Running 0 38m
+splunk-indexer-indexer-1 1/1 Running 0 38m
+splunk-indexer-indexer-2 1/1 Running 0 38m
+splunk-ingestor-ingestor-0 1/1 Running 0 53m
+splunk-ingestor-ingestor-1 1/1 Running 0 55m
+splunk-ingestor-ingestor-2 1/1 Running 0 57m
+splunk-ingestor-ingestor-3 0/1 Running 0 116s
+splunk-ingestor-ingestor-4 0/1 Running 0 116s
+```
+
+```
+kubectl top pod
+NAME CPU(cores) MEMORY(bytes)
+hec-locust-load-29270126-szgv2 532m 72Mi
+splunk-cm-cluster-manager-0 91m 1260Mi
+splunk-indexer-indexer-0 112m 865Mi
+splunk-indexer-indexer-1 115m 855Mi
+splunk-indexer-indexer-2 152m 1696Mi
+splunk-ingestor-ingestor-0 115m 482Mi
+splunk-ingestor-ingestor-1 76m 496Mi
+splunk-ingestor-ingestor-2 156m 553Mi
+splunk-ingestor-ingestor-3 355m 846Mi
+splunk-ingestor-ingestor-4 1036m 979Mi
+```
+
+```
+kubectl get hpa
+NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
+ing-hpa IngestorCluster/ingestor cpu: 115%/50% 3 10 10 8m54s
+```
+
+8. Generate fake load.
+
+- HEC_TOKEN: HEC token for making fake calls
+
+```
+$ kubectl get secret splunk-default-secret -o yaml
+apiVersion: v1
+data:
+ hec_token: HEC_TOKEN
+ idxc_secret: YWJjZGVmMTIzNDU2Cg==
+ pass4SymmKey: YWJjZGVmMTIzNDU2Cg==
+ password: YWJjZGVmMTIzNDU2Cg==
+ shc_secret: YWJjZGVmMTIzNDU2Cg==
+kind: Secret
+metadata:
+ creationTimestamp: "2025-08-26T10:15:11Z"
+ name: splunk-default-secret
+ namespace: default
+ ownerReferences:
+ - apiVersion: enterprise.splunk.com/v4
+ controller: false
+ kind: IngestorCluster
+ name: ingestor
+ uid: 12345678-1234-1234-1234-1234567890123
+ - apiVersion: enterprise.splunk.com/v4
+ controller: false
+ kind: ClusterManager
+ name: cm
+ uid: 12345678-1234-1234-1234-1234567890125
+ - apiVersion: enterprise.splunk.com/v4
+ controller: false
+ kind: IndexerCluster
+ name: indexer
+ uid: 12345678-1234-1234-1234-1234567890124
+ resourceVersion: "123456"
+ uid: 12345678-1234-1234-1234-1234567890126
+type: Opaque
+```
+
+```
+$ echo HEC_TOKEN | base64 -d
+HEC_TOKEN
+```
+
+```
+cat loadgen.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: hec-locust-config
+data:
+ requirements.txt: |
+ locust
+ requests
+ urllib3
+
+ locustfile.py: |
+ import urllib3
+ from locust import HttpUser, task, between
+
+ # disable insecure‐ssl warnings
+ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+
+ class HECUser(HttpUser):
+ wait_time = between(1, 2)
+ # use HTTPS and explicit port
+ host = "https://splunk-ingestor-ingestor-service:8088"
+
+ def on_start(self):
+ # turn off SSL cert verification
+ self.client.verify = False
+
+ @task
+ def send_event(self):
+ token = "HEC_TOKEN"
+ headers = {
+ "Authorization": f"Splunk {token}",
+ "Content-Type": "application/json"
+ }
+ payload = {"event": {"message": "load test", "value": 123}}
+ # this will POST to https://…:8088/services/collector/event
+ self.client.post(
+ "/services/collector/event",
+ json=payload,
+ headers=headers,
+ name="HEC POST"
+ )
+---
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: hec-locust-load
+spec:
+ schedule: "*/2 * * * *"
+ concurrencyPolicy: Replace
+ startingDeadlineSeconds: 60
+ jobTemplate:
+ spec:
+ backoffLimit: 1
+ template:
+ spec:
+ containers:
+ - name: locust
+ image: python:3.9-slim
+ command:
+ - sh
+ - -c
+ - |
+ pip install --no-cache-dir -r /app/requirements.txt \
+ && exec locust \
+ -f /app/locustfile.py \
+ --headless \
+ -u 200 \
+ -r 50 \
+ --run-time 1m50s
+ volumeMounts:
+ - name: app
+ mountPath: /app
+ restartPolicy: OnFailure
+ volumes:
+ - name: app
+ configMap:
+ name: hec-locust-config
+ defaultMode: 0755
+```
+
+```
+kubectl apply -f loadgen.yaml
+```
+
+```
+$ kubectl get cm
+NAME DATA AGE
+hec-locust-config 2 10s
+kube-root-ca.crt 1 5d2h
+splunk-cluster-manager-cm-configmap 1 28m
+splunk-default-probe-configmap 3 58m
+splunk-indexer-indexer-configmap 1 28m
+splunk-ingestor-ingestor-configmap 1 48m
+```
+
+```
+$ kubectl get cj
+NAME SCHEDULE TIMEZONE SUSPEND ACTIVE LAST SCHEDULE AGE
+hec-locust-load */2 * * * * False 1 2s 26s
+```
+
+```
+$ kubectl get po
+NAME READY STATUS RESTARTS AGE
+hec-locust-load-29270114-zq7zz 1/1 Running 0 15s
+splunk-cm-cluster-manager-0 1/1 Running 0 29m
+splunk-indexer-indexer-0 1/1 Running 0 26m
+splunk-indexer-indexer-1 1/1 Running 0 26m
+splunk-indexer-indexer-2 1/1 Running 0 26m
+splunk-ingestor-ingestor-0 1/1 Running 0 41m
+splunk-ingestor-ingestor-1 1/1 Running 0 43m
+splunk-ingestor-ingestor-2 1/1 Running 0 45m
+```
+
+```
+$ aws s3 ls s3://ingestion/smartbus-test/
+ PRE 29DDC1B4-D43E-47D1-AC04-C87AC7298201/
+ PRE 43E16731-7146-4397-8553-D68B5C2C8634/
+ PRE C8A4D060-DE0D-4DCB-9690-01D8902825DC/
+```
\ No newline at end of file
diff --git a/docs/ValidationWebhook.md b/docs/ValidationWebhook.md
new file mode 100644
index 000000000..6ccb6164b
--- /dev/null
+++ b/docs/ValidationWebhook.md
@@ -0,0 +1,280 @@
+# Validation Webhook
+
+The Splunk Operator includes an optional validation webhook that validates Splunk Enterprise Custom Resource (CR) specifications before they are persisted to the Kubernetes API server. This provides immediate feedback when invalid configurations are submitted.
+
+## Overview
+
+The validation webhook intercepts CREATE and UPDATE operations on Splunk Enterprise CRDs and validates the spec fields according to predefined rules. If validation fails, the request is rejected with a descriptive error message.
+
+### Supported CRDs
+
+The webhook validates the following Custom Resource Definitions:
+
+- Standalone
+- IndexerCluster
+- SearchHeadCluster
+- ClusterManager
+- LicenseManager
+- MonitoringConsole
+
+## Enabling the Validation Webhook
+
+The validation webhook is **disabled by default** and must be explicitly enabled. This is an opt-in feature for the v4 API.
+
+### Prerequisites
+
+Before enabling the webhook, ensure you have:
+
+1. **cert-manager** installed in your cluster (required for TLS certificate management)
+
+```bash
+kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.0/cert-manager.yaml
+kubectl wait --for=condition=Available --timeout=300s deployment/cert-manager -n cert-manager
+kubectl wait --for=condition=Available --timeout=300s deployment/cert-manager-webhook -n cert-manager
+```
+
+### Deployment Options
+
+#### Option 1: Use the Webhook-Enabled Kustomize Overlay
+
+Deploy using the `config/default-with-webhook` overlay which includes all necessary webhook components:
+
+```bash
+# Build and apply the webhook-enabled configuration
+kustomize build config/default-with-webhook | kubectl apply -f -
+```
+
+#### Option 2: Enable Webhook on Existing Deployment
+
+If you already have the operator deployed, you can enable the webhook by setting the `ENABLE_VALIDATION_WEBHOOK` environment variable:
+
+```bash
+kubectl set env deployment/splunk-operator-controller-manager \
+ ENABLE_VALIDATION_WEBHOOK=true -n splunk-operator
+```
+
+**Note:** This option also requires the webhook service, ValidatingWebhookConfiguration, and TLS certificates to be deployed. Use Option 1 for a complete deployment.
+
+#### Option 3: Modify Default Kustomization
+
+Edit `config/default/kustomization.yaml` to uncomment the webhook-related sections:
+
+1. Uncomment `- ../webhook` in the `bases` section
+2. Uncomment `- ../certmanager` in the `bases` section
+3. Uncomment `- manager_webhook_patch.yaml` in `patchesStrategicMerge`
+4. Uncomment `- webhookcainjection_patch.yaml` in `patchesStrategicMerge`
+5. Uncomment the `vars` section for certificate injection
+
+Then deploy:
+
+```bash
+make deploy IMG= SPLUNK_GENERAL_TERMS="--accept-sgt-current-at-splunk-com"
+```
+
+## Validated Fields
+
+The webhook validates the following spec fields:
+
+### Common Fields (All CRDs)
+
+| Field | Validation Rule | Error Message |
+|-------|-----------------|---------------|
+| `spec.etcVolumeStorageConfig.storageCapacity` | Must match format `^[0-9]+Gi$` (e.g., "10Gi", "100Gi") | must be in Gi format (e.g., '10Gi', '100Gi') |
+| `spec.varVolumeStorageConfig.storageCapacity` | Must match format `^[0-9]+Gi$` | must be in Gi format (e.g., '10Gi', '100Gi') |
+| `spec.etcVolumeStorageConfig.storageClassName` | Required when `ephemeralStorage=false` and `storageCapacity` is set | storageClassName is required when using persistent storage |
+| `spec.varVolumeStorageConfig.storageClassName` | Required when `ephemeralStorage=false` and `storageCapacity` is set | storageClassName is required when using persistent storage |
+
+### CRD-Specific Fields
+
+| CRD | Field | Validation Rule |
+|-----|-------|-----------------|
+| Standalone | `spec.replicas` | Must be ≥ 0 |
+| IndexerCluster | `spec.replicas` | Must be ≥ 3 |
+| SearchHeadCluster | `spec.replicas` | Must be ≥ 3 |
+
+### SmartStore Validation (Standalone, ClusterManager)
+
+SmartStore configuration is validated only when provided:
+
+| Field | Validation Rule |
+|-------|-----------------|
+| `spec.smartstore.volumes[*].name` | Required (non-empty) |
+| `spec.smartstore.volumes[*]` | Either `endpoint` or `path` must be specified |
+| `spec.smartstore.indexes[*].name` | Required (non-empty) |
+| `spec.smartstore.indexes[*].volumeName` | Required (non-empty) |
+
+### AppFramework Validation (Standalone, ClusterManager, SearchHeadCluster)
+
+AppFramework configuration is validated only when provided:
+
+| Field | Validation Rule |
+|-------|-----------------|
+| `spec.appRepo.appSources[*].name` | Required (non-empty) |
+| `spec.appRepo.appSources[*].location` | Required (non-empty) |
+| `spec.appRepo.volumes[*].name` | Required (non-empty) |
+
+## Example Validation Errors
+
+### Invalid Replicas
+
+```yaml
+apiVersion: enterprise.splunk.com/v4
+kind: Standalone
+metadata:
+ name: example
+spec:
+ replicas: -1 # Invalid: negative value
+```
+
+Error:
+```
+The Standalone "example" is invalid: .spec.replicas: Invalid value: -1: should be a non-negative integer
+```
+
+### Invalid Storage Configuration
+
+```yaml
+apiVersion: enterprise.splunk.com/v4
+kind: Standalone
+metadata:
+ name: example
+spec:
+ etcVolumeStorageConfig:
+ storageCapacity: "10GB" # Invalid: must use Gi suffix
+```
+
+Error:
+```
+The Standalone "example" is invalid: spec.etcVolumeStorageConfig.storageCapacity: Invalid value: "10GB": must be in Gi format (e.g., '10Gi', '100Gi')
+```
+
+### Missing SmartStore Volume Name
+
+```yaml
+apiVersion: enterprise.splunk.com/v4
+kind: Standalone
+metadata:
+ name: example
+spec:
+ smartstore:
+ volumes:
+ - name: "" # Invalid: empty name
+ endpoint: "s3://bucket"
+```
+
+Error:
+```
+The Standalone "example" is invalid: spec.smartstore.volumes[0].name: Required value: volume name is required
+```
+
+## Verifying Webhook Deployment
+
+### Check Webhook Pod is Running
+
+```bash
+kubectl get pods -n splunk-operator
+# Expected: splunk-operator-controller-manager-xxx 1/1 Running
+```
+
+### Check Certificate is Ready
+
+```bash
+kubectl get certificate -n splunk-operator
+# Expected: splunk-operator-serving-cert True webhook-server-cert
+```
+
+### Check Webhook is Registered
+
+```bash
+kubectl get validatingwebhookconfiguration splunk-operator-validating-webhook-configuration
+```
+
+### Check Operator Logs
+
+```bash
+kubectl logs -n splunk-operator deployment/splunk-operator-controller-manager | grep -i webhook
+# Look for: "Validation webhook enabled via ENABLE_VALIDATION_WEBHOOK=true"
+# Look for: "Starting webhook server" {"port": 9443}
+```
+
+## Troubleshooting
+
+### Webhook Not Being Called
+
+1. Verify the ValidatingWebhookConfiguration exists:
+ ```bash
+ kubectl get validatingwebhookconfiguration splunk-operator-validating-webhook-configuration -o yaml
+ ```
+
+2. Check that the CA bundle is injected:
+ ```bash
+ kubectl get validatingwebhookconfiguration splunk-operator-validating-webhook-configuration \
+ -o jsonpath='{.webhooks[0].clientConfig.caBundle}' | base64 -d | head -1
+ # Should show: -----BEGIN CERTIFICATE-----
+ ```
+
+3. Verify webhook service endpoints:
+ ```bash
+ kubectl get endpoints -n splunk-operator splunk-operator-webhook-service
+ # Should show an IP address
+ ```
+
+### Certificate Issues
+
+1. Check cert-manager logs:
+ ```bash
+ kubectl logs -n cert-manager deployment/cert-manager
+ ```
+
+2. Check certificate status:
+ ```bash
+ kubectl describe certificate -n splunk-operator splunk-operator-serving-cert
+ ```
+
+3. Check issuer:
+ ```bash
+ kubectl get issuer -n splunk-operator
+ ```
+
+### Webhook Disabled
+
+If you see "Validation webhook disabled" in the logs, ensure:
+
+1. The `ENABLE_VALIDATION_WEBHOOK` environment variable is set to `true`
+2. You're using the correct kustomize overlay (`config/default-with-webhook`)
+
+## Architecture
+
+The validation webhook consists of:
+
+| Component | Description |
+|-----------|-------------|
+| **Webhook Server** | HTTP server listening on port 9443 with TLS |
+| **Validator Registry** | Maps CRD types to their validation functions |
+| **ValidatingWebhookConfiguration** | Kubernetes resource that registers the webhook |
+| **Certificate** | TLS certificate managed by cert-manager |
+| **Service** | Kubernetes service exposing the webhook endpoint |
+
+### Request Flow
+
+1. User submits a CREATE/UPDATE request for a Splunk CRD
+2. Kubernetes API server intercepts the request
+3. API server sends an AdmissionReview to the webhook service
+4. Webhook server validates the spec fields
+5. Webhook returns Allowed/Denied response
+6. If allowed, the resource is persisted; if denied, user receives error
+
+## Disabling the Webhook
+
+To disable the webhook after it has been enabled:
+
+```bash
+kubectl set env deployment/splunk-operator-controller-manager \
+ ENABLE_VALIDATION_WEBHOOK=false -n splunk-operator
+```
+
+Or redeploy using the default kustomization (without webhook):
+
+```bash
+make deploy IMG= SPLUNK_GENERAL_TERMS="--accept-sgt-current-at-splunk-com"
+```
diff --git a/go.mod b/go.mod
index e1d9c42b5..3615f95ed 100644
--- a/go.mod
+++ b/go.mod
@@ -1,51 +1,53 @@
module github.com/splunk/splunk-operator
-go 1.25.5
+go 1.25.7
require (
- cloud.google.com/go/storage v1.30.1
+ cloud.google.com/go/storage v1.36.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1
- github.com/aws/aws-sdk-go-v2 v1.36.6
+ github.com/aws/aws-sdk-go-v2 v1.41.1
github.com/aws/aws-sdk-go-v2/config v1.29.18
github.com/aws/aws-sdk-go-v2/credentials v1.17.71
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.85
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.1
+ github.com/aws/aws-sdk-go-v2/service/sqs v1.42.21
github.com/go-logr/logr v1.4.3
github.com/google/go-cmp v0.7.0
github.com/google/uuid v1.6.0
github.com/joho/godotenv v1.5.1
github.com/minio/minio-go/v7 v7.0.16
- github.com/onsi/ginkgo/v2 v2.27.3
- github.com/onsi/gomega v1.38.3
+ github.com/onsi/ginkgo v1.16.5
+ github.com/onsi/ginkgo/v2 v2.28.1
+ github.com/onsi/gomega v1.39.1
github.com/pkg/errors v0.9.1
- github.com/prometheus/client_golang v1.19.1
- github.com/stretchr/testify v1.9.0
+ github.com/prometheus/client_golang v1.22.0
+ github.com/stretchr/testify v1.11.1
github.com/wk8/go-ordered-map/v2 v2.1.7
- go.uber.org/zap v1.26.0
- google.golang.org/api v0.126.0
- k8s.io/api v0.31.0
- k8s.io/apiextensions-apiserver v0.31.0
- k8s.io/apimachinery v0.31.0
- k8s.io/client-go v0.31.0
+ go.uber.org/zap v1.27.0
+ google.golang.org/api v0.155.0
+ k8s.io/api v0.33.0
+ k8s.io/apiextensions-apiserver v0.33.0
+ k8s.io/apimachinery v0.33.0
+ k8s.io/client-go v0.33.0
k8s.io/kubectl v0.26.2
- sigs.k8s.io/controller-runtime v0.19.0
+ sigs.k8s.io/controller-runtime v0.21.0
)
require (
- cloud.google.com/go v0.110.7 // indirect
- cloud.google.com/go/compute/metadata v0.3.0 // indirect
- cloud.google.com/go/iam v1.1.1 // indirect
+ cel.dev/expr v0.24.0 // indirect
+ cloud.google.com/go v0.112.0 // indirect
+ cloud.google.com/go/compute/metadata v0.9.0 // indirect
+ cloud.google.com/go/iam v1.1.5 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
- github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.37 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
@@ -55,51 +57,50 @@ require (
github.com/aws/aws-sdk-go-v2/service/sso v1.25.6 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.34.1 // indirect
- github.com/aws/smithy-go v1.22.4 // indirect
+ github.com/aws/smithy-go v1.24.0 // indirect
github.com/bahlo/generic-list-go v0.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/buger/jsonparser v1.1.1 // indirect
- github.com/cenkalti/backoff/v4 v4.3.0 // indirect
+ github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
- github.com/evanphx/json-patch/v5 v5.9.0 // indirect
+ github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
- github.com/go-openapi/jsonpointer v0.19.6 // indirect
+ github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
- github.com/go-openapi/swag v0.22.4 // indirect
+ github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
- github.com/google/cel-go v0.20.1 // indirect
- github.com/google/gnostic-models v0.6.8 // indirect
- github.com/google/gofuzz v1.2.0 // indirect
- github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
- github.com/google/s2a-go v0.1.4 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
- github.com/googleapis/gax-go/v2 v2.11.0 // indirect
- github.com/gorilla/websocket v1.5.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
- github.com/imdario/mergo v0.3.12 // indirect
+ github.com/google/btree v1.1.3 // indirect
+ github.com/google/cel-go v0.23.2 // indirect
+ github.com/google/gnostic-models v0.6.9 // indirect
+ github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect
+ github.com/google/s2a-go v0.1.7 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
+ github.com/googleapis/gax-go/v2 v2.12.0 // indirect
+ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.13.5 // indirect
+ github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid v1.3.1 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/minio/md5-simd v1.1.0 // indirect
github.com/minio/sha256-simd v0.1.1 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
- github.com/moby/spdystream v0.4.0 // indirect
+ github.com/moby/spdystream v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
@@ -107,57 +108,57 @@ require (
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.55.0 // indirect
+ github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rs/xid v1.2.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
- github.com/stoewer/go-strcase v1.2.0 // indirect
+ github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
- go.opentelemetry.io/otel v1.28.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
- go.opentelemetry.io/otel/metric v1.28.0 // indirect
- go.opentelemetry.io/otel/sdk v1.28.0 // indirect
- go.opentelemetry.io/otel/trace v1.28.0 // indirect
- go.opentelemetry.io/proto/otlp v1.3.1 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
+ go.opentelemetry.io/otel v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect
+ go.opentelemetry.io/otel/metric v1.40.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.40.0 // indirect
+ go.opentelemetry.io/otel/trace v1.40.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.9.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
- golang.org/x/crypto v0.45.0 // indirect
- golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
- golang.org/x/mod v0.29.0 // indirect
- golang.org/x/net v0.47.0 // indirect
- golang.org/x/oauth2 v0.27.0 // indirect
- golang.org/x/sync v0.18.0 // indirect
- golang.org/x/sys v0.38.0 // indirect
- golang.org/x/term v0.37.0 // indirect
- golang.org/x/text v0.31.0 // indirect
- golang.org/x/time v0.6.0 // indirect
- golang.org/x/tools v0.38.0 // indirect
- golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
+ golang.org/x/crypto v0.47.0 // indirect
+ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
+ golang.org/x/mod v0.32.0 // indirect
+ golang.org/x/net v0.49.0 // indirect
+ golang.org/x/oauth2 v0.34.0 // indirect
+ golang.org/x/sync v0.19.0 // indirect
+ golang.org/x/sys v0.40.0 // indirect
+ golang.org/x/term v0.39.0 // indirect
+ golang.org/x/text v0.33.0 // indirect
+ golang.org/x/time v0.9.0 // indirect
+ golang.org/x/tools v0.41.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
- google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect
- google.golang.org/grpc v1.65.0 // indirect
- google.golang.org/protobuf v1.36.7 // indirect
+ google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/grpc v1.78.0 // indirect
+ google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.66.4 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/apiserver v0.31.0 // indirect
- k8s.io/component-base v0.31.0 // indirect
+ k8s.io/apiserver v0.33.0 // indirect
+ k8s.io/component-base v0.33.0 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
- k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
- k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
- sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+ k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
+ k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
+ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
+ sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
+ sigs.k8s.io/randfill v1.0.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
diff --git a/go.sum b/go.sum
index b0a4c1cd2..f4c6dae6b 100644
--- a/go.sum
+++ b/go.sum
@@ -1,13 +1,14 @@
+cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
+cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o=
-cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
-cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
-cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
-cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y=
-cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
-cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM=
-cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E=
+cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
+cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
+cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
+cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
+cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
+cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
+cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8=
+cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
@@ -23,15 +24,12 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/aws/aws-sdk-go-v2 v1.36.6 h1:zJqGjVbRdTPojeCGWn5IR5pbJwSQSBh5RWFTQcEQGdU=
-github.com/aws/aws-sdk-go-v2 v1.36.6/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0=
+github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
+github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11/go.mod h1:dd+Lkp6YmMryke+qxW/VnKyhMBDTYP41Q2Bb+6gNZgY=
github.com/aws/aws-sdk-go-v2/config v1.29.18 h1:x4T1GRPnqKV8HMJOMtNktbpQMl3bIsfx8KbqmveUO2I=
@@ -42,10 +40,10 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33 h1:D9ixiWSG4lyUBL2DDNK924
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33/go.mod h1:caS/m4DI+cij2paz3rtProRBI4s/+TCiWoaWZuQ9010=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.85 h1:AfpstoiaenxGSCUheWiicgZE5XXS5Fi4CcQ4PA/x+Qw=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.85/go.mod h1:HxiF0Fd6WHWjdjOffLkCauq7JqzWqMMq0iUVLS7cPQc=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37 h1:osMWfm/sC/L4tvEdQ65Gri5ZZDCUpuYJZbTTDrsn4I0=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37/go.mod h1:ZV2/1fbjOPr4G4v38G3Ww5TBT4+hmsK45s/rxu1fGy0=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37 h1:v+X21AvTb2wZ+ycg1gx+orkB/9U6L7AOp93R7qYxsxM=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37/go.mod h1:G0uM1kyssELxmJ2VZEfG0q2npObR3BAkF3c1VsfVnfs=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.37 h1:XTZZ0I3SZUHAtBLBU6395ad+VOblE0DwQP6MuaNeics=
@@ -60,14 +58,16 @@ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.18 h1:OS2e0SKqsU2Li
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.18/go.mod h1:+Yrk+MDGzlNGxCXieljNeWpoZTCQUQVL+Jk9hGGJ8qM=
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.1 h1:RkHXU9jP0DptGy7qKI8CBGsUJruWz0v5IgwBa2DwWcU=
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.1/go.mod h1:3xAOf7tdKF+qbb+XpU+EPhNXAdun3Lu1RcDrj8KC24I=
+github.com/aws/aws-sdk-go-v2/service/sqs v1.42.21 h1:Oa0IhwDLVrcBHDlNo1aosG4CxO4HyvzDV5xUWqWcBc0=
+github.com/aws/aws-sdk-go-v2/service/sqs v1.42.21/go.mod h1:t98Ssq+qtXKXl2SFtaSkuT6X42FSM//fnO6sfq5RqGM=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.6 h1:rGtWqkQbPk7Bkwuv3NzpE/scwwL9sC1Ul3tn9x83DUI=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.6/go.mod h1:u4ku9OLv4TO4bCPdxf4fA1upaMaJmP9ZijGk3AAOC6Q=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4 h1:OV/pxyXh+eMA0TExHEC4jyWdumLxNbzz1P0zJoezkJc=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4/go.mod h1:8Mm5VGYwtm+r305FfPSuc+aFkrypeylGYhFim6XEPoc=
github.com/aws/aws-sdk-go-v2/service/sts v1.34.1 h1:aUrLQwJfZtwv3/ZNG2xRtEen+NqI3iesuacjP51Mv1s=
github.com/aws/aws-sdk-go-v2/service/sts v1.34.1/go.mod h1:3wFBZKoWnX3r+Sm7in79i54fBmNfwhdNdQuscCw7QIk=
-github.com/aws/smithy-go v1.22.4 h1:uqXzVZNuNexwc/xrh6Tb56u89WDlJY6HS+KC0S4QSjw=
-github.com/aws/smithy-go v1.22.4/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
+github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
+github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -76,19 +76,15 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
-github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
-github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
+github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0=
+github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -103,20 +99,24 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM=
+github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo=
+github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
+github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
-github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
+github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
+github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs=
github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo=
github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M=
@@ -130,13 +130,15 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
-github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
-github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
@@ -151,9 +153,7 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@@ -162,21 +162,20 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
-github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
-github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
-github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
-github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4=
+github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo=
+github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
+github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
@@ -185,26 +184,24 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
-github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
-github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
-github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
-github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
+github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc=
+github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
+github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
+github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
-github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
-github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4=
-github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
+github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
-github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
-github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
@@ -219,8 +216,9 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
@@ -248,8 +246,8 @@ github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKU
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8=
-github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
+github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
+github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -261,29 +259,39 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8=
-github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
-github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM=
-github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
+github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
+github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
-github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
-github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
-github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
+github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -296,8 +304,8 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
-github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
+github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -311,8 +319,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
@@ -327,115 +335,103 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
-go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
-go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
-go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
-go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
-go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
-go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
-go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
-go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
-go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
+go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
+go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs=
+go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
+go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
+go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
+go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
+go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
+go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
+go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
+go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
+go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
+go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
-go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
-golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
+golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
+golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
-golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
-golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
+golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
+golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
-golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
+golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
+golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
-golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
+golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
+golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
-golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
-golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
-golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
+golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
+golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
-golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
-golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
-golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
-golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
+golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
+golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -444,10 +440,10 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
-golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
+golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
+golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -456,32 +452,28 @@ golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3j
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
-google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o=
-google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
+gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
+gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
+google.golang.org/api v0.155.0 h1:vBmGhCYs0djJttDNynWo44zosHlPvHmA0XiN2zP2DtA=
+google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
-google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
-google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
-google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ=
+google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
+google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
-google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
-google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
+google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -491,59 +483,61 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
-google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
-k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
-k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk=
-k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk=
-k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
-k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
-k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY=
-k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk=
-k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
-k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
-k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs=
-k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo=
+k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU=
+k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM=
+k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs=
+k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc=
+k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ=
+k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
+k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc=
+k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8=
+k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98=
+k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg=
+k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk=
+k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/kubectl v0.26.2 h1:SMPB4j48eVFxsYluBq3VLyqXtE6b72YnszkbTAtFye4=
k8s.io/kubectl v0.26.2/go.mod h1:KYWOXSwp2BrDn3kPeoU/uKzKtdqvhK1dgZGd0+no4cM=
-k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
-k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
-sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q=
-sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
+k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
+sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8=
+sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
+sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
+sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml
index 09e90481e..e5541e017 100644
--- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml
+++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml
@@ -2,7 +2,7 @@
apiVersion: v1
kind: List
items:
-{{- range default (default (until 1) .Values.sva.c3.indexerClusters) .Values.sva.m4.indexerClusters }}
+{{- range default (default (list (dict "name" .Values.indexerCluster.name)) .Values.sva.c3.indexerClusters) .Values.sva.m4.indexerClusters }}
- apiVersion: enterprise.splunk.com/v4
kind: IndexerCluster
metadata:
@@ -163,5 +163,19 @@ items:
{{ toYaml . | indent 6 }}
{{- end }}
{{- end }}
+ {{- with $.Values.indexerCluster.queueRef }}
+ queueRef:
+ name: {{ .name }}
+ {{- if .namespace }}
+ namespace: {{ .namespace }}
+ {{- end }}
+ {{- end }}
+ {{- with $.Values.indexerCluster.objectStorageRef }}
+ objectStorageRef:
+ name: {{ .name }}
+ {{- if .namespace }}
+ namespace: {{ .namespace }}
+ {{- end }}
+ {{- end }}
{{- end }}
{{- end }}
\ No newline at end of file
diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml
new file mode 100644
index 000000000..e5ab1258c
--- /dev/null
+++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml
@@ -0,0 +1,137 @@
+{{- if .Values.ingestorCluster.enabled }}
+apiVersion: enterprise.splunk.com/v4
+kind: IngestorCluster
+metadata:
+ name: {{ .Values.ingestorCluster.name }}
+ namespace: {{ default .Release.Namespace .Values.ingestorCluster.namespaceOverride }}
+ {{- with .Values.ingestorCluster.additionalLabels }}
+ labels:
+ {{ toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.ingestorCluster.additionalAnnotations }}
+ annotations:
+ {{ toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ default 3 .Values.ingestorCluster.replicaCount }}
+ {{- if .Values.image.repository }}
+ image: {{ .Values.image.repository }}
+ {{- end }}
+ {{- if .Values.image.imagePullPolicy }}
+ imagePullPolicy: {{ .Values.image.imagePullPolicy }}
+ {{- end }}
+ {{- with .Values.image.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- if .Values.ingestorCluster.serviceAccount }}
+ serviceAccount: {{ .Values.ingestorCluster.serviceAccount }}
+ {{- end }}
+ {{- if .Values.existingLicenseManager.name }}
+ licenseManagerRef:
+ name: {{ .Values.existingLicenseManager.name }}
+ {{- if .Values.existingLicenseManager.namespace }}
+ namespace: {{ .Values.existingLicenseManager.namespace }}
+ {{- end }}
+ {{- else if and .Values.licenseManager.enabled .Values.licenseManager.name }}
+ licenseManagerRef:
+ name: {{ .Values.licenseManager.name }}
+ {{- if .Values.licenseManager.namespaceOverride }}
+ namespace: {{ .Values.licenseManager.namespaceOverride }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.existingMonitoringConsole.name }}
+ monitoringConsoleRef:
+ name: {{ .Values.existingMonitoringConsole.name }}
+ {{- if .Values.existingMonitoringConsole.namespace }}
+ namespace: {{ .Values.existingMonitoringConsole.namespace }}
+ {{- end }}
+ {{- else if and .Values.monitoringConsole.enabled .Values.monitoringConsole.name }}
+ monitoringConsoleRef:
+ name: {{ .Values.monitoringConsole.name }}
+ {{- if .Values.monitoringConsole.namespaceOverride }}
+ namespace: {{ .Values.monitoringConsole.namespaceOverride }}
+ {{- end }}
+ {{- end }}
+ livenessInitialDelaySeconds: {{ default 300 .Values.ingestorCluster.livenessInitialDelaySeconds }}
+ readinessInitialDelaySeconds: {{ default 10 .Values.ingestorCluster.readinessInitialDelaySeconds }}
+ {{- with .Values.ingestorCluster.startupProbe }}
+ startupProbe:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.ingestorCluster.livenessProbe }}
+ livenessProbe:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.ingestorCluster.readinessProbe }}
+ readinessProbe:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.ingestorCluster.etcVolumeStorageConfig }}
+ etcVolumeStorageConfig:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.ingestorCluster.varVolumeStorageConfig }}
+ varVolumeStorageConfig:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.ingestorCluster.resources }}
+ resources:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.ingestorCluster.serviceTemplate }}
+ serviceTemplate:
+{{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.ingestorCluster.tolerations }}
+ tolerations:
+{{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.ingestorCluster.affinity }}
+ affinity:
+{{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.ingestorCluster.topologySpreadConstraints }}
+ topologySpreadConstraints:
+{{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with $.Values.ingestorCluster.queueRef }}
+ queueRef:
+ name: {{ $.Values.ingestorCluster.queueRef.name }}
+ {{- if $.Values.ingestorCluster.queueRef.namespace }}
+ namespace: {{ $.Values.ingestorCluster.queueRef.namespace }}
+ {{- end }}
+ {{- end }}
+ {{- with $.Values.ingestorCluster.objectStorageRef }}
+ objectStorageRef:
+ name: {{ $.Values.ingestorCluster.objectStorageRef.name }}
+ {{- if $.Values.ingestorCluster.objectStorageRef.namespace }}
+ namespace: {{ $.Values.ingestorCluster.objectStorageRef.namespace }}
+ {{- end }}
+ {{- end }}
+ {{- with .Values.ingestorCluster.extraEnv }}
+ extraEnv:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.ingestorCluster.appRepo }}
+ appRepo:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.ingestorCluster.volumes }}
+ volumes:
+{{ toYaml . | indent 4 }}
+ {{- end }}
+ {{- if .Values.ingestorCluster.licenseUrl }}
+ licenseUrl: {{ .Values.ingestorCluster.licenseUrl }}
+ {{- end }}
+ {{- if .Values.ingestorCluster.defaultsUrl }}
+ defaultsUrl: {{ .Values.ingestorCluster.defaultsUrl }}
+ {{- end }}
+ {{- if .Values.ingestorCluster.defaults }}
+ defaults: |-
+ {{ toYaml .Values.ingestorCluster.defaults | indent 4 }}
+ {{- end }}
+ {{- if .Values.ingestorCluster.defaultsUrlApps }}
+ defaultsUrlApps: {{ .Values.ingestorCluster.defaultsUrlApps }}
+ {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml
new file mode 100644
index 000000000..033aed904
--- /dev/null
+++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml
@@ -0,0 +1,28 @@
+{{- if .Values.objectStorage }}
+{{- if .Values.objectStorage.enabled }}
+apiVersion: enterprise.splunk.com/v4
+kind: ObjectStorage
+metadata:
+ name: {{ .Values.objectStorage.name }}
+ namespace: {{ default .Release.Namespace .Values.objectStorage.namespaceOverride }}
+ {{- with .Values.objectStorage.additionalLabels }}
+ labels:
+{{ toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.objectStorage.additionalAnnotations }}
+ annotations:
+{{ toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ provider: {{ .Values.objectStorage.provider | quote }}
+ {{- with .Values.objectStorage.s3 }}
+ s3:
+ {{- if .endpoint }}
+ endpoint: {{ .endpoint | quote }}
+ {{- end }}
+ {{- if .path }}
+ path: {{ .path | quote }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml
new file mode 100644
index 000000000..06a3c5dbd
--- /dev/null
+++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml
@@ -0,0 +1,38 @@
+{{- if .Values.queue }}
+{{- if .Values.queue.enabled }}
+apiVersion: enterprise.splunk.com/v4
+kind: Queue
+metadata:
+ name: {{ .Values.queue.name }}
+ namespace: {{ default .Release.Namespace .Values.queue.namespaceOverride }}
+ {{- with .Values.queue.additionalLabels }}
+ labels:
+{{ toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.queue.additionalAnnotations }}
+ annotations:
+{{ toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ provider: {{ .Values.queue.provider | quote }}
+ {{- with .Values.queue.sqs }}
+ sqs:
+ {{- if .endpoint }}
+ endpoint: {{ .endpoint | quote }}
+ {{- end }}
+ {{- if .dlq }}
+ dlq: {{ .dlq | quote }}
+ {{- end }}
+ {{- if .name }}
+ name: {{ .name | quote }}
+ {{- end }}
+ {{- if .authRegion }}
+ authRegion: {{ .authRegion | quote }}
+ {{- end }}
+ {{- if .volumes }}
+ volumes:
+ {{ toYaml . | indent 4 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_searchheadcluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_searchheadcluster.yaml
index a2114fc27..fb238f692 100644
--- a/helm-chart/splunk-enterprise/templates/enterprise_v4_searchheadcluster.yaml
+++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_searchheadcluster.yaml
@@ -26,6 +26,14 @@ items:
{{- end }}
{{- with $.Values.searchHeadCluster.appRepo }}
appRepo:
+{{ toYaml . | indent 6 }}
+ {{- end }}
+ {{- with $.Values.searchHeadCluster.deployerNodeAffinity }}
+ deployerNodeAffinity:
+{{ toYaml . | indent 6 }}
+ {{- end }}
+ {{- with $.Values.searchHeadCluster.deployerResourceSpec }}
+ deployerResourceSpec:
{{ toYaml . | indent 6 }}
{{- end }}
{{- if $.Values.existingClusterManager }}
diff --git a/helm-chart/splunk-enterprise/values.yaml b/helm-chart/splunk-enterprise/values.yaml
index da6308b1f..204f57878 100644
--- a/helm-chart/splunk-enterprise/values.yaml
+++ b/helm-chart/splunk-enterprise/values.yaml
@@ -350,6 +350,10 @@ indexerCluster:
# nodeAffinityPolicy: [Honor|Ignore] # optional; beta since v1.26
# nodeTaintsPolicy: [Honor|Ignore] # optional; beta since v1.26
+ queueRef: {}
+
+ objectStorageRef: {}
+
searchHeadCluster:
enabled: false
@@ -396,6 +400,10 @@ searchHeadCluster:
defaultsUrlApps: ""
+ deployerNodeAffinity: {}
+
+ deployerResourceSpec: {}
+
extraEnv: []
# - name:
# value:
@@ -808,3 +816,95 @@ extraManifests: []
# spec:
# securityPolicy:
# name: "gcp-cloud-armor-policy-test"
+
+ingestorCluster:
+
+ enabled: false
+
+ name: "ingestor"
+
+ namespaceOverride: ""
+
+ additionalLabels: {}
+
+ additionalAnnotations: {}
+
+ replicaCount: 3
+
+ appRepo: {}
+ # appsRepoPollIntervalSeconds:
+ # defaults:
+ # volumeName:
+ # scope:
+ # appSources:
+ # - name:
+ # location:
+ # volumes:
+ # - name:
+ # storageType:
+ # provider:
+ # path:
+ # endpoint:
+ # region:
+ # secretRef:
+
+ volumes: []
+
+ extraEnv: []
+ # - name:
+ # value:
+
+ livenessInitialDelaySeconds: 300
+
+ readinessInitialDelaySeconds: 10
+
+ # Set Probes for Splunk instance pod containers
+ # reference: https://github.com/splunk/splunk-operator/blob/main/docs/HealthCheck.md
+ startupProbe: {}
+ # initialDelaySeconds: 40
+ # timeoutSeconds: 30
+ # periodSeconds: 30
+ # failureThreshold: 12
+ livenessProbe: {}
+ # initialDelaySeconds: 30
+ # timeoutSeconds: 30
+ # periodSeconds: 30
+ # failureThreshold: 3
+ readinessProbe: {}
+ # initialDelaySeconds: 10
+ # timeoutSeconds: 5
+ # periodSeconds: 5
+ # failureThreshold: 3
+
+ etcVolumeStorageConfig:
+ ephemeralStorage: false
+ storageCapacity: 10Gi
+ # storageClassName: gp2
+
+ varVolumeStorageConfig:
+ ephemeralStorage: false
+ storageCapacity: 100Gi
+ # storageClassName: gp2
+
+ resources: {}
+ # requests:
+ # memory: "2Gi"
+ # cpu: "4"
+ # limits:
+ # memory: "12Gi"
+ # cpu: "24"
+
+ serviceAccount: ""
+
+ # ServiceTemplate is a template used to create Kubernetes services
+ serviceTemplate: {}
+
+ topologySpreadConstraints: []
+
+ tolerations: []
+
+ affinity: {}
+
+ queueRef: {}
+
+ objectStorageRef: {}
\ No newline at end of file
diff --git a/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml b/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml
index 2b5d51ec9..a952b174c 100644
--- a/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml
+++ b/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml
@@ -222,6 +222,32 @@ rules:
- get
- patch
- update
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters/status
+ verbs:
+ - get
+ - patch
+ - update
- apiGroups:
- enterprise.splunk.com
resources:
@@ -300,6 +326,58 @@ rules:
- get
- patch
- update
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues/status
+ verbs:
+ - get
+ - patch
+ - update
- apiGroups:
- enterprise.splunk.com
resources:
diff --git a/helm-chart/splunk-operator/templates/rbac/ingestorcluster_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/ingestorcluster_editor_role.yaml
new file mode 100644
index 000000000..b161aea9c
--- /dev/null
+++ b/helm-chart/splunk-operator/templates/rbac/ingestorcluster_editor_role.yaml
@@ -0,0 +1,55 @@
+# This rule is not used by the project splunk-operator itself.
+# It is provided to allow the cluster admin to help manage permissions for users.
+#
+# Grants permissions to create, update, and delete resources within the enterprise.splunk.com.
+# This role is intended for users who need to manage these resources
+# but should not control RBAC or manage permissions for others.
+{{- if .Values.splunkOperator.clusterWideAccess }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "splunk-operator.operator.fullname" . }}-ingestorcluster-editor-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters/status
+ verbs:
+ - get
+{{- else }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "splunk-operator.operator.fullname" . }}-ingestorcluster-editor-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters/status
+ verbs:
+ - get
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/splunk-operator/templates/rbac/ingestorcluster_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/ingestorcluster_viewer_role.yaml
new file mode 100644
index 000000000..47287423f
--- /dev/null
+++ b/helm-chart/splunk-operator/templates/rbac/ingestorcluster_viewer_role.yaml
@@ -0,0 +1,47 @@
+# This rule is not used by the project splunk-operator itself.
+# It is provided to allow the cluster admin to help manage permissions for users.
+#
+# Grants read-only access to enterprise.splunk.com resources.
+# This role is intended for users who need visibility into these resources
+# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing.
+{{- if .Values.splunkOperator.clusterWideAccess }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "splunk-operator.operator.fullname" . }}-ingestorcluster-viewer-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters/status
+ verbs:
+ - get
+{{- else }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "splunk-operator.operator.fullname" . }}-ingestorcluster-viewer-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters/status
+ verbs:
+ - get
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml
new file mode 100644
index 000000000..d90f7673b
--- /dev/null
+++ b/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml
@@ -0,0 +1,55 @@
+# This rule is not used by the project splunk-operator itself.
+# It is provided to allow the cluster admin to help manage permissions for users.
+#
+# Grants permissions to create, update, and delete resources within the enterprise.splunk.com.
+# This role is intended for users who need to manage these resources
+# but should not control RBAC or manage permissions for others.
+{{- if .Values.splunkOperator.clusterWideAccess }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-editor-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages/status
+ verbs:
+ - get
+{{- else }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-editor-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages/status
+ verbs:
+ - get
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml
new file mode 100644
index 000000000..ec9358b8d
--- /dev/null
+++ b/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml
@@ -0,0 +1,47 @@
+# This rule is not used by the project splunk-operator itself.
+# It is provided to allow the cluster admin to help manage permissions for users.
+#
+# Grants read-only access to enterprise.splunk.com resources.
+# This role is intended for users who need visibility into these resources
+# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing.
+{{- if .Values.splunkOperator.clusterWideAccess }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-viewer-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages/status
+ verbs:
+ - get
+{{- else }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-viewer-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages/status
+ verbs:
+ - get
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml
new file mode 100644
index 000000000..6c04be75b
--- /dev/null
+++ b/helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml
@@ -0,0 +1,55 @@
+# This rule is not used by the project splunk-operator itself.
+# It is provided to allow the cluster admin to help manage permissions for users.
+#
+# Grants permissions to create, update, and delete resources within the enterprise.splunk.com.
+# This role is intended for users who need to manage these resources
+# but should not control RBAC or manage permissions for others.
+{{- if .Values.splunkOperator.clusterWideAccess }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "splunk-operator.operator.fullname" . }}-queue-editor-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues/status
+ verbs:
+ - get
+{{- else }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "splunk-operator.operator.fullname" . }}-queue-editor-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues/status
+ verbs:
+ - get
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml
new file mode 100644
index 000000000..2c81b98fd
--- /dev/null
+++ b/helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml
@@ -0,0 +1,47 @@
+# This rule is not used by the project splunk-operator itself.
+# It is provided to allow the cluster admin to help manage permissions for users.
+#
+# Grants read-only access to enterprise.splunk.com resources.
+# This role is intended for users who need visibility into these resources
+# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing.
+{{- if .Values.splunkOperator.clusterWideAccess }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "splunk-operator.operator.fullname" . }}-queue-viewer-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues/status
+ verbs:
+ - get
+{{- else }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "splunk-operator.operator.fullname" . }}-queue-viewer-role
+rules:
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues/status
+ verbs:
+ - get
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/splunk-operator/templates/rbac/role.yaml b/helm-chart/splunk-operator/templates/rbac/role.yaml
index 2a2869654..77be54727 100644
--- a/helm-chart/splunk-operator/templates/rbac/role.yaml
+++ b/helm-chart/splunk-operator/templates/rbac/role.yaml
@@ -222,6 +222,84 @@ rules:
- get
- patch
- update
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - ingestorclusters/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - queues/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - enterprise.splunk.com
+ resources:
+ - objectstorages/status
+ verbs:
+ - get
+ - patch
+ - update
- apiGroups:
- enterprise.splunk.com
resources:
diff --git a/internal/controller/clustermanager_controller.go b/internal/controller/clustermanager_controller.go
index f149bf129..2a844bd0a 100644
--- a/internal/controller/clustermanager_controller.go
+++ b/internal/controller/clustermanager_controller.go
@@ -22,14 +22,17 @@ import (
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
"github.com/splunk/splunk-operator/internal/controller/common"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
"github.com/pkg/errors"
metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics"
enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise"
+ splutil "github.com/splunk/splunk-operator/pkg/splunk/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
@@ -42,7 +45,8 @@ import (
// ClusterManagerReconciler reconciles a ClusterManager object
type ClusterManagerReconciler struct {
client.Client
- Scheme *runtime.Scheme
+ Scheme *runtime.Scheme
+ Recorder record.EventRecorder
}
//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=clustermanagers,verbs=get;list;watch;create;update;patch;delete
@@ -103,7 +107,10 @@ func (r *ClusterManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque
reqLogger.Info("start", "CR version", instance.GetResourceVersion())
- result, err := ApplyClusterManager(ctx, r.Client, instance)
+ // Pass event recorder through context
+ ctx = context.WithValue(ctx, splcommon.EventRecorderKey, r.Recorder)
+
+ result, err := ApplyClusterManager(ctx, r.Client, instance, nil)
if result.Requeue && result.RequeueAfter != 0 {
reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second))
}
@@ -112,8 +119,8 @@ func (r *ClusterManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
// ApplyClusterManager adding to handle unit test case
-var ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) {
- return enterprise.ApplyClusterManager(ctx, client, instance)
+var ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) (reconcile.Result, error) {
+ return enterprise.ApplyClusterManager(ctx, client, instance, podExecClient)
}
func (r *ClusterManagerReconciler) SetupWithManager(mgr ctrl.Manager) error {
@@ -156,6 +163,7 @@ func (r *ClusterManagerReconciler) SetupWithManager(mgr ctrl.Manager) error {
WithOptions(controller.Options{
MaxConcurrentReconciles: enterpriseApi.TotalWorker,
}).
+ Named("cluster-manager-controller").
Complete(r)
}
diff --git a/internal/controller/clustermanager_controller_test.go b/internal/controller/clustermanager_controller_test.go
index 99cd1289a..771d2f4f6 100644
--- a/internal/controller/clustermanager_controller_test.go
+++ b/internal/controller/clustermanager_controller_test.go
@@ -3,9 +3,11 @@ package controller
import (
"context"
"fmt"
+
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ splutil "github.com/splunk/splunk-operator/pkg/splunk/util"
"time"
@@ -35,7 +37,7 @@ var _ = Describe("ClusterManager Controller", func() {
It("Get ClusterManager custom resource should failed", func() {
namespace := "ns-splunk-cm-1"
- ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) {
+ ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) (reconcile.Result, error) {
return reconcile.Result{}, nil
}
nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
@@ -51,7 +53,7 @@ var _ = Describe("ClusterManager Controller", func() {
It("Create ClusterManager custom resource with annotations should pause", func() {
namespace := "ns-splunk-cm-2"
- ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) {
+ ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) (reconcile.Result, error) {
return reconcile.Result{}, nil
}
nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
@@ -71,7 +73,7 @@ var _ = Describe("ClusterManager Controller", func() {
Context("ClusterManager Management", func() {
It("Create ClusterManager custom resource should succeeded", func() {
namespace := "ns-splunk-cm-3"
- ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) {
+ ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) (reconcile.Result, error) {
return reconcile.Result{}, nil
}
nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
@@ -84,7 +86,7 @@ var _ = Describe("ClusterManager Controller", func() {
It("Cover Unused methods", func() {
namespace := "ns-splunk-cm-4"
- ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) {
+ ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) (reconcile.Result, error) {
return reconcile.Result{}, nil
}
nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
diff --git a/internal/controller/clustermaster_controller.go b/internal/controller/clustermaster_controller.go
index 9f261f85b..4e2b5b94a 100644
--- a/internal/controller/clustermaster_controller.go
+++ b/internal/controller/clustermaster_controller.go
@@ -22,6 +22,7 @@ import (
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
"github.com/splunk/splunk-operator/internal/controller/common"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
"github.com/pkg/errors"
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
@@ -31,6 +32,7 @@ import (
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
@@ -43,7 +45,8 @@ import (
// ClusterMasterReconciler reconciles a ClusterMaster object
type ClusterMasterReconciler struct {
client.Client
- Scheme *runtime.Scheme
+ Scheme *runtime.Scheme
+ Recorder record.EventRecorder
}
//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=clustermasters,verbs=get;list;watch;create;update;patch;delete
@@ -104,6 +107,9 @@ func (r *ClusterMasterReconciler) Reconcile(ctx context.Context, req ctrl.Reques
reqLogger.Info("start", "CR version", instance.GetResourceVersion())
+ // Pass event recorder through context
+ ctx = context.WithValue(ctx, splcommon.EventRecorderKey, r.Recorder)
+
result, err := ApplyClusterMaster(ctx, r.Client, instance)
if result.Requeue && result.RequeueAfter != 0 {
reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second))
@@ -158,5 +164,6 @@ func (r *ClusterMasterReconciler) SetupWithManager(mgr ctrl.Manager) error {
WithOptions(controller.Options{
MaxConcurrentReconciles: enterpriseApi.TotalWorker,
}).
+ Named("cluster-master-controller").
Complete(r)
}
diff --git a/internal/controller/clustermaster_controller_test.go b/internal/controller/clustermaster_controller_test.go
index 02d86c736..5c5de2584 100644
--- a/internal/controller/clustermaster_controller_test.go
+++ b/internal/controller/clustermaster_controller_test.go
@@ -3,6 +3,7 @@ package controller
import (
"context"
"fmt"
+
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go
index bc9a6c9f5..ddc8b17c9 100644
--- a/internal/controller/indexercluster_controller.go
+++ b/internal/controller/indexercluster_controller.go
@@ -26,11 +26,14 @@ import (
"github.com/pkg/errors"
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
@@ -43,7 +46,8 @@ import (
// IndexerClusterReconciler reconciles a IndexerCluster object
type IndexerClusterReconciler struct {
client.Client
- Scheme *runtime.Scheme
+ Scheme *runtime.Scheme
+ Recorder record.EventRecorder
}
//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=indexerclusters,verbs=get;list;watch;create;update;patch;delete
@@ -103,6 +107,9 @@ func (r *IndexerClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque
reqLogger.Info("start", "CR version", instance.GetResourceVersion())
+ // Pass event recorder through context
+ ctx = context.WithValue(ctx, splcommon.EventRecorderKey, r.Recorder)
+
result, err := ApplyIndexerCluster(ctx, r.Client, instance)
if result.Requeue && result.RequeueAfter != 0 {
reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second))
@@ -147,6 +154,57 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
mgr.GetRESTMapper(),
&enterpriseApi.IndexerCluster{},
)).
+ Watches(&corev1.Secret{},
+ handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
+ secret, ok := obj.(*corev1.Secret)
+ if !ok {
+ return nil
+ }
+
+ // Only consider indexer clusters in the same namespace as the Secret
+ var list enterpriseApi.IndexerClusterList
+ if err := r.Client.List(ctx, &list, client.InNamespace(secret.Namespace)); err != nil {
+ return nil
+ }
+
+ var reqs []reconcile.Request
+ for _, ic := range list.Items {
+ if ic.Spec.QueueRef.Name == "" {
+ continue
+ }
+
+ queueNS := ic.Spec.QueueRef.Namespace
+ if queueNS == "" {
+ queueNS = ic.Namespace
+ }
+
+ queue := &enterpriseApi.Queue{}
+ if err := r.Client.Get(ctx, types.NamespacedName{
+ Name: ic.Spec.QueueRef.Name,
+ Namespace: queueNS,
+ }, queue); err != nil {
+ continue
+ }
+
+ if queue.Spec.Provider != "sqs" && queue.Spec.Provider != "sqs_cp" {
+ continue
+ }
+
+ for _, vol := range queue.Spec.SQS.VolList {
+ if vol.SecretRef == secret.Name {
+ reqs = append(reqs, reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: ic.Name,
+ Namespace: ic.Namespace,
+ },
+ })
+ break
+ }
+ }
+ }
+ return reqs
+ }),
+ ).
Watches(&corev1.Pod{},
handler.EnqueueRequestForOwner(
mgr.GetScheme(),
@@ -171,8 +229,65 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
mgr.GetRESTMapper(),
&enterpriseApi.IndexerCluster{},
)).
+ Watches(&enterpriseApi.Queue{},
+ handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
+ b, ok := obj.(*enterpriseApi.Queue)
+ if !ok {
+ return nil
+ }
+ var list enterpriseApi.IndexerClusterList
+ if err := r.Client.List(ctx, &list); err != nil {
+ return nil
+ }
+ var reqs []reconcile.Request
+ for _, ic := range list.Items {
+ ns := ic.Spec.QueueRef.Namespace
+ if ns == "" {
+ ns = ic.Namespace
+ }
+ if ic.Spec.QueueRef.Name == b.Name && ns == b.Namespace {
+ reqs = append(reqs, reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: ic.Name,
+ Namespace: ic.Namespace,
+ },
+ })
+ }
+ }
+ return reqs
+ }),
+ ).
+ Watches(&enterpriseApi.ObjectStorage{},
+ handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
+ os, ok := obj.(*enterpriseApi.ObjectStorage)
+ if !ok {
+ return nil
+ }
+ var list enterpriseApi.IndexerClusterList
+ if err := r.Client.List(ctx, &list); err != nil {
+ return nil
+ }
+ var reqs []reconcile.Request
+ for _, ic := range list.Items {
+ ns := ic.Spec.ObjectStorageRef.Namespace
+ if ns == "" {
+ ns = ic.Namespace
+ }
+ if ic.Spec.ObjectStorageRef.Name == os.Name && ns == os.Namespace {
+ reqs = append(reqs, reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: ic.Name,
+ Namespace: ic.Namespace,
+ },
+ })
+ }
+ }
+ return reqs
+ }),
+ ).
WithOptions(controller.Options{
MaxConcurrentReconciles: enterpriseApi.TotalWorker,
}).
+ Named("indexer-cluster-controller").
Complete(r)
}
diff --git a/internal/controller/indexercluster_controller_test.go b/internal/controller/indexercluster_controller_test.go
index fe221bbd5..f9473f0f8 100644
--- a/internal/controller/indexercluster_controller_test.go
+++ b/internal/controller/indexercluster_controller_test.go
@@ -3,6 +3,7 @@ package controller
import (
"context"
"fmt"
+
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go
new file mode 100644
index 000000000..2725d32a6
--- /dev/null
+++ b/internal/controller/ingestorcluster_controller.go
@@ -0,0 +1,264 @@
+// Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+ "context"
+ "time"
+
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/tools/record"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+ "github.com/pkg/errors"
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/splunk/splunk-operator/internal/controller/common"
+ metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
+ enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise"
+)
+
+// IngestorClusterReconciler reconciles a IngestorCluster object
+type IngestorClusterReconciler struct {
+ client.Client
+ Scheme *runtime.Scheme
+ Recorder record.EventRecorder
+}
+
+// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters/status,verbs=get;update;patch
+// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters/finalizers,verbs=update
+
+// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues;objectstorages,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/status;objectstorages/status,verbs=get;update;patch
+// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/finalizers;objectstorages/finalizers,verbs=update
+
+// Reconcile is part of the main kubernetes reconciliation loop which aims to
+// move the current state of the cluster closer to the desired state.
+// TODO(user): Modify the Reconcile function to compare the state specified by
+// the IngestorCluster object against the actual cluster state, and then
+// perform operations to make the cluster state reflect the state specified by
+// the user.
+//
+// For more details, check Reconcile and its Result here:
+// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/reconcile
+func (r *IngestorClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "IngestorCluster")).Inc()
+ defer recordInstrumentionData(time.Now(), req, "controller", "IngestorCluster")
+
+ reqLogger := log.FromContext(ctx)
+ reqLogger = reqLogger.WithValues("ingestorcluster", req.NamespacedName)
+
+ // Fetch the IngestorCluster
+ instance := &enterpriseApi.IngestorCluster{}
+ err := r.Get(ctx, req.NamespacedName, instance)
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ // Request object not found, could have been deleted after
+ // reconcile request. Owned objects are automatically
+ // garbage collected. For additional cleanup logic use
+ // finalizers. Return and don't requeue
+ return ctrl.Result{}, nil
+ }
+ // Error reading the object - requeue the request.
+ return ctrl.Result{}, errors.Wrap(err, "could not load ingestor cluster data")
+ }
+
+ // If the reconciliation is paused, requeue
+ annotations := instance.GetAnnotations()
+ if annotations != nil {
+ if _, ok := annotations[enterpriseApi.IngestorClusterPausedAnnotation]; ok {
+ return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil
+ }
+ }
+
+ reqLogger.Info("start", "CR version", instance.GetResourceVersion())
+
+ // Pass event recorder through context
+ ctx = context.WithValue(ctx, splcommon.EventRecorderKey, r.Recorder)
+
+ result, err := ApplyIngestorCluster(ctx, r.Client, instance)
+ if result.Requeue && result.RequeueAfter != 0 {
+ reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second))
+ }
+
+ return result, err
+}
+
+var ApplyIngestorCluster = func(ctx context.Context, client client.Client, instance *enterpriseApi.IngestorCluster) (reconcile.Result, error) {
+ return enterprise.ApplyIngestorCluster(ctx, client, instance)
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewControllerManagedBy(mgr).
+ For(&enterpriseApi.IngestorCluster{}).
+ WithEventFilter(predicate.Or(
+ common.GenerationChangedPredicate(),
+ common.AnnotationChangedPredicate(),
+ common.LabelChangedPredicate(),
+ common.SecretChangedPredicate(),
+ common.ConfigMapChangedPredicate(),
+ common.StatefulsetChangedPredicate(),
+ common.PodChangedPredicate(),
+ common.CrdChangedPredicate(),
+ )).
+ Watches(&appsv1.StatefulSet{},
+ handler.EnqueueRequestForOwner(
+ mgr.GetScheme(),
+ mgr.GetRESTMapper(),
+ &enterpriseApi.IngestorCluster{},
+ )).
+ Watches(&corev1.Secret{},
+ handler.EnqueueRequestForOwner(
+ mgr.GetScheme(),
+ mgr.GetRESTMapper(),
+ &enterpriseApi.IngestorCluster{},
+ )).
+ Watches(&corev1.Secret{},
+ handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
+ secret, ok := obj.(*corev1.Secret)
+ if !ok {
+ return nil
+ }
+
+ // Only consider ingestor clusters in the same namespace as the Secret
+ var list enterpriseApi.IngestorClusterList
+ if err := r.Client.List(ctx, &list, client.InNamespace(secret.Namespace)); err != nil {
+ return nil
+ }
+
+ var reqs []reconcile.Request
+ for _, ic := range list.Items {
+ if ic.Spec.QueueRef.Name == "" {
+ continue
+ }
+
+ queueNS := ic.Spec.QueueRef.Namespace
+ if queueNS == "" {
+ queueNS = ic.Namespace
+ }
+
+ queue := &enterpriseApi.Queue{}
+ if err := r.Client.Get(ctx, types.NamespacedName{
+ Name: ic.Spec.QueueRef.Name,
+ Namespace: queueNS,
+ }, queue); err != nil {
+ continue
+ }
+
+ if queue.Spec.Provider != "sqs" && queue.Spec.Provider != "sqs_cp" {
+ continue
+ }
+
+ for _, vol := range queue.Spec.SQS.VolList {
+ if vol.SecretRef == secret.Name {
+ reqs = append(reqs, reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: ic.Name,
+ Namespace: ic.Namespace,
+ },
+ })
+ break
+ }
+ }
+ }
+ return reqs
+ }),
+ ).
+ Watches(&corev1.Pod{},
+ handler.EnqueueRequestForOwner(
+ mgr.GetScheme(),
+ mgr.GetRESTMapper(),
+ &enterpriseApi.IngestorCluster{},
+ )).
+ Watches(&corev1.ConfigMap{},
+ handler.EnqueueRequestForOwner(
+ mgr.GetScheme(),
+ mgr.GetRESTMapper(),
+ &enterpriseApi.IngestorCluster{},
+ )).
+ Watches(&enterpriseApi.Queue{},
+ handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
+ queue, ok := obj.(*enterpriseApi.Queue)
+ if !ok {
+ return nil
+ }
+ var list enterpriseApi.IngestorClusterList
+ if err := r.Client.List(ctx, &list); err != nil {
+ return nil
+ }
+ var reqs []reconcile.Request
+ for _, ic := range list.Items {
+ ns := ic.Spec.QueueRef.Namespace
+ if ns == "" {
+ ns = ic.Namespace
+ }
+ if ic.Spec.QueueRef.Name == queue.Name && ns == queue.Namespace {
+ reqs = append(reqs, reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: ic.Name,
+ Namespace: ic.Namespace,
+ },
+ })
+ }
+ }
+ return reqs
+ }),
+ ).
+ Watches(&enterpriseApi.ObjectStorage{},
+ handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
+ os, ok := obj.(*enterpriseApi.ObjectStorage)
+ if !ok {
+ return nil
+ }
+ var list enterpriseApi.IngestorClusterList
+ if err := r.Client.List(ctx, &list); err != nil {
+ return nil
+ }
+ var reqs []reconcile.Request
+ for _, ic := range list.Items {
+ ns := ic.Spec.ObjectStorageRef.Namespace
+ if ns == "" {
+ ns = ic.Namespace
+ }
+ if ic.Spec.ObjectStorageRef.Name == os.Name && ns == os.Namespace {
+ reqs = append(reqs, reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: ic.Name,
+ Namespace: ic.Namespace,
+ },
+ })
+ }
+ }
+ return reqs
+ }),
+ ).
+ WithOptions(controller.Options{
+ MaxConcurrentReconciles: enterpriseApi.TotalWorker,
+ }).
+ Named("ingestor-cluster-controller").
+ Complete(r)
+}
diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go
new file mode 100644
index 000000000..6caf99cb2
--- /dev/null
+++ b/internal/controller/ingestorcluster_controller_test.go
@@ -0,0 +1,341 @@
+// Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/splunk/splunk-operator/internal/controller/testutils"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/kubernetes/scheme"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+)
+
+var _ = Describe("IngestorCluster Controller", func() {
+ BeforeEach(func() {
+ time.Sleep(2 * time.Second)
+ })
+
+ AfterEach(func() {
+
+ })
+
+ Context("IngestorCluster Management", func() {
+
+ It("Get IngestorCluster custom resource should fail", func() {
+ namespace := "ns-splunk-ing-1"
+ ApplyIngestorCluster = func(ctx context.Context, client client.Client, instance *enterpriseApi.IngestorCluster) (reconcile.Result, error) {
+ return reconcile.Result{}, nil
+ }
+ nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
+
+ Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed())
+
+ _, err := GetIngestorCluster("test", nsSpecs.Name)
+ Expect(err.Error()).Should(Equal("ingestorclusters.enterprise.splunk.com \"test\" not found"))
+
+ Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed())
+ })
+
+ It("Create IngestorCluster custom resource with annotations should pause", func() {
+ namespace := "ns-splunk-ing-2"
+ annotations := make(map[string]string)
+ annotations[enterpriseApi.IngestorClusterPausedAnnotation] = ""
+ ApplyIngestorCluster = func(ctx context.Context, client client.Client, instance *enterpriseApi.IngestorCluster) (reconcile.Result, error) {
+ return reconcile.Result{}, nil
+ }
+ nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
+
+ Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed())
+
+ queue := &enterpriseApi.Queue{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ Namespace: nsSpecs.Name,
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "smartbus-queue",
+ AuthRegion: "us-west-2",
+ DLQ: "smartbus-dlq",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ },
+ },
+ }
+ os := &enterpriseApi.ObjectStorage{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "os",
+ Namespace: nsSpecs.Name,
+ },
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-west-2.amazonaws.com",
+ Path: "ingestion/smartbus-test",
+ },
+ },
+ }
+ CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, os, queue)
+ icSpec, _ := GetIngestorCluster("test", nsSpecs.Name)
+ annotations = map[string]string{}
+ icSpec.Annotations = annotations
+ icSpec.Status.Phase = "Ready"
+ UpdateIngestorCluster(icSpec, enterpriseApi.PhaseReady, os, queue)
+ DeleteIngestorCluster("test", nsSpecs.Name)
+ Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed())
+ })
+
+ It("Create IngestorCluster custom resource should succeeded", func() {
+ namespace := "ns-splunk-ing-3"
+ ApplyIngestorCluster = func(ctx context.Context, client client.Client, instance *enterpriseApi.IngestorCluster) (reconcile.Result, error) {
+ return reconcile.Result{}, nil
+ }
+ nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
+
+ Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed())
+
+ annotations := make(map[string]string)
+ queue := &enterpriseApi.Queue{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ Namespace: nsSpecs.Name,
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "smartbus-queue",
+ AuthRegion: "us-west-2",
+ DLQ: "smartbus-dlq",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ },
+ },
+ }
+ os := &enterpriseApi.ObjectStorage{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "os",
+ Namespace: nsSpecs.Name,
+ },
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-west-2.amazonaws.com",
+ Path: "ingestion/smartbus-test",
+ },
+ },
+ }
+ CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, os, queue)
+ DeleteIngestorCluster("test", nsSpecs.Name)
+ Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed())
+ })
+
+ It("Cover Unused methods", func() {
+ namespace := "ns-splunk-ing-4"
+ ApplyIngestorCluster = func(ctx context.Context, client client.Client, instance *enterpriseApi.IngestorCluster) (reconcile.Result, error) {
+ return reconcile.Result{}, nil
+ }
+ nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
+
+ Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed())
+
+ queue := &enterpriseApi.Queue{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ Namespace: nsSpecs.Name,
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "smartbus-queue",
+ AuthRegion: "us-west-2",
+ DLQ: "smartbus-dlq",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ },
+ },
+ }
+ os := &enterpriseApi.ObjectStorage{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "os",
+ Namespace: nsSpecs.Name,
+ },
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-west-2.amazonaws.com",
+ Path: "ingestion/smartbus-test",
+ },
+ },
+ }
+
+ ctx := context.TODO()
+ builder := fake.NewClientBuilder()
+ c := builder.Build()
+ instance := IngestorClusterReconciler{
+ Client: c,
+ Scheme: scheme.Scheme,
+ }
+ request := reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "test",
+ Namespace: namespace,
+ },
+ }
+ _, err := instance.Reconcile(ctx, request)
+ Expect(err).ToNot(HaveOccurred())
+
+ icSpec := testutils.NewIngestorCluster("test", namespace, "image", os, queue)
+ Expect(c.Create(ctx, icSpec)).Should(Succeed())
+
+ annotations := make(map[string]string)
+ annotations[enterpriseApi.IngestorClusterPausedAnnotation] = ""
+ icSpec.Annotations = annotations
+ Expect(c.Update(ctx, icSpec)).Should(Succeed())
+
+ _, err = instance.Reconcile(ctx, request)
+ Expect(err).ToNot(HaveOccurred())
+
+ annotations = map[string]string{}
+ icSpec.Annotations = annotations
+ Expect(c.Update(ctx, icSpec)).Should(Succeed())
+
+ _, err = instance.Reconcile(ctx, request)
+ Expect(err).ToNot(HaveOccurred())
+
+ icSpec.DeletionTimestamp = &metav1.Time{}
+ _, err = instance.Reconcile(ctx, request)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ })
+})
+
+func GetIngestorCluster(name string, namespace string) (*enterpriseApi.IngestorCluster, error) {
+ By("Expecting IngestorCluster custom resource to be retrieved successfully")
+
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: namespace,
+ }
+ ic := &enterpriseApi.IngestorCluster{}
+
+ err := k8sClient.Get(context.Background(), key, ic)
+ if err != nil {
+ return nil, err
+ }
+
+ return ic, err
+}
+
+func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster {
+ By("Expecting IngestorCluster custom resource to be created successfully")
+
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: namespace,
+ }
+ ingSpec := &enterpriseApi.IngestorCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ Annotations: annotations,
+ },
+ Spec: enterpriseApi.IngestorClusterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ ImagePullPolicy: "IfNotPresent",
+ },
+ },
+ Replicas: 3,
+ QueueRef: corev1.ObjectReference{
+ Name: queue.Name,
+ Namespace: queue.Namespace,
+ },
+ ObjectStorageRef: corev1.ObjectReference{
+ Name: os.Name,
+ Namespace: os.Namespace,
+ },
+ },
+ }
+
+ Expect(k8sClient.Create(context.Background(), ingSpec)).Should(Succeed())
+ time.Sleep(2 * time.Second)
+
+ ic := &enterpriseApi.IngestorCluster{}
+ Eventually(func() bool {
+ _ = k8sClient.Get(context.Background(), key, ic)
+ if status != "" {
+ fmt.Printf("status is set to %v", status)
+ ic.Status.Phase = status
+ Expect(k8sClient.Status().Update(context.Background(), ic)).Should(Succeed())
+ time.Sleep(2 * time.Second)
+ }
+ return true
+ }, timeout, interval).Should(BeTrue())
+
+ return ic
+}
+
+func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enterpriseApi.Phase, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster {
+ By("Expecting IngestorCluster custom resource to be updated successfully")
+
+ key := types.NamespacedName{
+ Name: instance.Name,
+ Namespace: instance.Namespace,
+ }
+
+ icSpec := testutils.NewIngestorCluster(instance.Name, instance.Namespace, "image", os, queue)
+ icSpec.ResourceVersion = instance.ResourceVersion
+ Expect(k8sClient.Update(context.Background(), icSpec)).Should(Succeed())
+ time.Sleep(2 * time.Second)
+
+ ic := &enterpriseApi.IngestorCluster{}
+ Eventually(func() bool {
+ _ = k8sClient.Get(context.Background(), key, ic)
+ if status != "" {
+ fmt.Printf("status is set to %v", status)
+ ic.Status.Phase = status
+ Expect(k8sClient.Status().Update(context.Background(), ic)).Should(Succeed())
+ time.Sleep(2 * time.Second)
+ }
+ return true
+ }, timeout, interval).Should(BeTrue())
+
+ return ic
+}
+
+func DeleteIngestorCluster(name string, namespace string) {
+ By("Expecting IngestorCluster custom resource to be deleted successfully")
+
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: namespace,
+ }
+
+ Eventually(func() error {
+ ic := &enterpriseApi.IngestorCluster{}
+ _ = k8sClient.Get(context.Background(), key, ic)
+ err := k8sClient.Delete(context.Background(), ic)
+ return err
+ }, timeout, interval).Should(Succeed())
+}
diff --git a/internal/controller/licensemanager_controller.go b/internal/controller/licensemanager_controller.go
index cb749a736..27e39a7f5 100644
--- a/internal/controller/licensemanager_controller.go
+++ b/internal/controller/licensemanager_controller.go
@@ -22,6 +22,7 @@ import (
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
"github.com/splunk/splunk-operator/internal/controller/common"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
"github.com/pkg/errors"
metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics"
@@ -30,6 +31,7 @@ import (
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
@@ -42,7 +44,8 @@ import (
// LicenseManagerReconciler reconciles a LicenseManager object
type LicenseManagerReconciler struct {
client.Client
- Scheme *runtime.Scheme
+ Scheme *runtime.Scheme
+ Recorder record.EventRecorder
}
//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=licensemanagers,verbs=get;list;watch;create;update;patch;delete
@@ -102,6 +105,9 @@ func (r *LicenseManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque
reqLogger.Info("start", "CR version", instance.GetResourceVersion())
+ // Pass event recorder through context
+ ctx = context.WithValue(ctx, splcommon.EventRecorderKey, r.Recorder)
+
result, err := ApplyLicenseManager(ctx, r.Client, instance)
if result.Requeue && result.RequeueAfter != 0 {
reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second))
@@ -156,5 +162,6 @@ func (r *LicenseManagerReconciler) SetupWithManager(mgr ctrl.Manager) error {
WithOptions(controller.Options{
MaxConcurrentReconciles: enterpriseApi.TotalWorker,
}).
+ Named("license-manager-controller").
Complete(r)
}
diff --git a/internal/controller/licensemanager_controller_test.go b/internal/controller/licensemanager_controller_test.go
index 0ec97a639..4d95d6b5f 100644
--- a/internal/controller/licensemanager_controller_test.go
+++ b/internal/controller/licensemanager_controller_test.go
@@ -3,6 +3,7 @@ package controller
import (
"context"
"fmt"
+
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
diff --git a/internal/controller/licensemaster_controller.go b/internal/controller/licensemaster_controller.go
index 717632e33..c413cab50 100644
--- a/internal/controller/licensemaster_controller.go
+++ b/internal/controller/licensemaster_controller.go
@@ -21,6 +21,7 @@ import (
"time"
"github.com/splunk/splunk-operator/internal/controller/common"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
"github.com/pkg/errors"
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
@@ -31,6 +32,7 @@ import (
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
@@ -43,7 +45,8 @@ import (
// LicenseMasterReconciler reconciles a LicenseMaster object
type LicenseMasterReconciler struct {
client.Client
- Scheme *runtime.Scheme
+ Scheme *runtime.Scheme
+ Recorder record.EventRecorder
}
//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=licensemasters,verbs=get;list;watch;create;update;patch;delete
@@ -103,6 +106,9 @@ func (r *LicenseMasterReconciler) Reconcile(ctx context.Context, req ctrl.Reques
reqLogger.Info("start", "CR version", instance.GetResourceVersion())
+ // Pass event recorder through context
+ ctx = context.WithValue(ctx, splcommon.EventRecorderKey, r.Recorder)
+
result, err := ApplyLicenseMaster(ctx, r.Client, instance)
if result.Requeue && result.RequeueAfter != 0 {
reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second))
@@ -157,5 +163,6 @@ func (r *LicenseMasterReconciler) SetupWithManager(mgr ctrl.Manager) error {
WithOptions(controller.Options{
MaxConcurrentReconciles: enterpriseApi.TotalWorker,
}).
+ Named("license-master-controller").
Complete(r)
}
diff --git a/internal/controller/licensemaster_controller_test.go b/internal/controller/licensemaster_controller_test.go
index fc2dc7b7c..fdd967aa3 100644
--- a/internal/controller/licensemaster_controller_test.go
+++ b/internal/controller/licensemaster_controller_test.go
@@ -3,6 +3,7 @@ package controller
import (
"context"
"fmt"
+
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
diff --git a/internal/controller/monitoringconsole_controller.go b/internal/controller/monitoringconsole_controller.go
index 3767fcac2..571e0f9f8 100644
--- a/internal/controller/monitoringconsole_controller.go
+++ b/internal/controller/monitoringconsole_controller.go
@@ -22,6 +22,7 @@ import (
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
"github.com/splunk/splunk-operator/internal/controller/common"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
"github.com/pkg/errors"
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
@@ -31,6 +32,7 @@ import (
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
@@ -43,7 +45,8 @@ import (
// MonitoringConsoleReconciler reconciles a MonitoringConsole object
type MonitoringConsoleReconciler struct {
client.Client
- Scheme *runtime.Scheme
+ Scheme *runtime.Scheme
+ Recorder record.EventRecorder
}
//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=monitoringconsoles,verbs=get;list;watch;create;update;patch;delete
@@ -102,6 +105,9 @@ func (r *MonitoringConsoleReconciler) Reconcile(ctx context.Context, req ctrl.Re
reqLogger.Info("start", "CR version", instance.GetResourceVersion())
+ // Pass event recorder through context
+ ctx = context.WithValue(ctx, splcommon.EventRecorderKey, r.Recorder)
+
result, err := ApplyMonitoringConsole(ctx, r.Client, instance)
if result.Requeue && result.RequeueAfter != 0 {
reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second))
@@ -170,5 +176,6 @@ func (r *MonitoringConsoleReconciler) SetupWithManager(mgr ctrl.Manager) error {
WithOptions(controller.Options{
MaxConcurrentReconciles: enterpriseApi.TotalWorker,
}).
+ Named("monitoring-console-controller").
Complete(r)
}
diff --git a/internal/controller/monitoringconsole_controller_test.go b/internal/controller/monitoringconsole_controller_test.go
index bc5949d53..644f13da0 100644
--- a/internal/controller/monitoringconsole_controller_test.go
+++ b/internal/controller/monitoringconsole_controller_test.go
@@ -3,6 +3,7 @@ package controller
import (
"context"
"fmt"
+
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
diff --git a/internal/controller/searchheadcluster_controller.go b/internal/controller/searchheadcluster_controller.go
index 53d50fab9..9ff2eca36 100644
--- a/internal/controller/searchheadcluster_controller.go
+++ b/internal/controller/searchheadcluster_controller.go
@@ -22,6 +22,7 @@ import (
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
"github.com/splunk/splunk-operator/internal/controller/common"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
"github.com/pkg/errors"
metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics"
@@ -30,6 +31,7 @@ import (
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
@@ -42,7 +44,8 @@ import (
// SearchHeadClusterReconciler reconciles a SearchHeadCluster object
type SearchHeadClusterReconciler struct {
client.Client
- Scheme *runtime.Scheme
+ Scheme *runtime.Scheme
+ Recorder record.EventRecorder
}
//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=searchheadclusters,verbs=get;list;watch;create;update;patch;delete
@@ -102,6 +105,9 @@ func (r *SearchHeadClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re
reqLogger.Info("start", "CR version", instance.GetResourceVersion())
+ // Pass event recorder through context
+ ctx = context.WithValue(ctx, splcommon.EventRecorderKey, r.Recorder)
+
result, err := ApplySearchHeadCluster(ctx, r.Client, instance)
if result.Requeue && result.RequeueAfter != 0 {
reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second))
@@ -155,5 +161,6 @@ func (r *SearchHeadClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
WithOptions(controller.Options{
MaxConcurrentReconciles: enterpriseApi.TotalWorker,
}).
+ Named("search-head-cluster-controller").
Complete(r)
}
diff --git a/internal/controller/searchheadcluster_controller_test.go b/internal/controller/searchheadcluster_controller_test.go
index 2e764909c..983849237 100644
--- a/internal/controller/searchheadcluster_controller_test.go
+++ b/internal/controller/searchheadcluster_controller_test.go
@@ -3,6 +3,7 @@ package controller
import (
"context"
"fmt"
+
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
diff --git a/internal/controller/standalone_controller.go b/internal/controller/standalone_controller.go
index 93e85b7f0..bb7106f05 100644
--- a/internal/controller/standalone_controller.go
+++ b/internal/controller/standalone_controller.go
@@ -22,9 +22,11 @@ import (
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
"github.com/splunk/splunk-operator/internal/controller/common"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -47,7 +49,8 @@ const (
// StandaloneReconciler reconciles a Standalone object
type StandaloneReconciler struct {
client.Client
- Scheme *runtime.Scheme
+ Scheme *runtime.Scheme
+ Recorder record.EventRecorder
}
//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=standalones,verbs=get;list;watch;create;update;patch;delete
@@ -107,6 +110,9 @@ func (r *StandaloneReconciler) Reconcile(ctx context.Context, req ctrl.Request)
reqLogger.Info("start", "CR version", instance.GetResourceVersion())
+ // Pass event recorder through context
+ ctx = context.WithValue(ctx, splcommon.EventRecorderKey, r.Recorder)
+
result, err := ApplyStandalone(ctx, r.Client, instance)
if result.Requeue && result.RequeueAfter != 0 {
reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second))
@@ -161,5 +167,6 @@ func (r *StandaloneReconciler) SetupWithManager(mgr ctrl.Manager) error {
WithOptions(controller.Options{
MaxConcurrentReconciles: enterpriseApi.TotalWorker,
}).
+ Named("standalone-controller").
Complete(r)
}
diff --git a/internal/controller/standalone_controller_test.go b/internal/controller/standalone_controller_test.go
index 68b0cdb48..d7c4ca842 100644
--- a/internal/controller/standalone_controller_test.go
+++ b/internal/controller/standalone_controller_test.go
@@ -3,6 +3,7 @@ package controller
import (
"context"
"fmt"
+
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go
index be2c1a50f..142a8720c 100644
--- a/internal/controller/suite_test.go
+++ b/internal/controller/suite_test.go
@@ -50,7 +50,6 @@ func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Controller Suite")
-
}
var _ = BeforeSuite(func(ctx context.Context) {
@@ -117,6 +116,12 @@ var _ = BeforeSuite(func(ctx context.Context) {
}).SetupWithManager(k8sManager); err != nil {
Expect(err).NotTo(HaveOccurred())
}
+ if err := (&IngestorClusterReconciler{
+ Client: k8sManager.GetClient(),
+ Scheme: k8sManager.GetScheme(),
+ }).SetupWithManager(k8sManager); err != nil {
+ Expect(err).NotTo(HaveOccurred())
+ }
if err := (&LicenseManagerReconciler{
Client: k8sManager.GetClient(),
Scheme: k8sManager.GetScheme(),
diff --git a/internal/controller/telemetry_controller.go b/internal/controller/telemetry_controller.go
new file mode 100644
index 000000000..7214f67cc
--- /dev/null
+++ b/internal/controller/telemetry_controller.go
@@ -0,0 +1,115 @@
+/*
+Copyright (c) 2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package controller
+
+import (
+ "context"
+ "fmt"
+ enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "time"
+
+ metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics"
+
+ corev1 "k8s.io/api/core/v1"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+)
+
+const (
+ // Below two contants are defined at kustomizatio*.yaml
+ ConfigMapNamePrefix = "splunk-operator-"
+ ConfigMapLabelName = "splunk-operator"
+
+ telemetryRetryDelay = time.Second * 600
+)
+
+var applyTelemetryFn = enterprise.ApplyTelemetry
+
+type TelemetryReconciler struct {
+ client.Client
+ Scheme *runtime.Scheme
+}
+
+//+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch
+
+func (r *TelemetryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "Telemetry")).Inc()
+ defer recordInstrumentionData(time.Now(), req, "controller", "Telemetry")
+
+ reqLogger := log.FromContext(ctx)
+ reqLogger = reqLogger.WithValues("telemetry", req.NamespacedName)
+
+ reqLogger.Info("Reconciling telemetry")
+
+ defer func() {
+ if rec := recover(); rec != nil {
+ reqLogger.Error(fmt.Errorf("panic: %v", rec), "Recovered from panic in TelemetryReconciler.Reconcile")
+ }
+ }()
+
+ // Fetch the ConfigMap
+ cm := &corev1.ConfigMap{}
+ err := r.Get(ctx, req.NamespacedName, cm)
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ reqLogger.Info("telemetry configmap not found; requeueing", "period(seconds)", int(telemetryRetryDelay/time.Second))
+ return ctrl.Result{Requeue: true, RequeueAfter: telemetryRetryDelay}, nil
+ }
+ reqLogger.Error(err, "could not load telemetry configmap; requeueing", "period(seconds)", int(telemetryRetryDelay/time.Second))
+ return ctrl.Result{Requeue: true, RequeueAfter: telemetryRetryDelay}, nil
+ }
+
+ if len(cm.Data) == 0 {
+ reqLogger.Info("telemetry configmap has no data keys")
+ return ctrl.Result{Requeue: true, RequeueAfter: telemetryRetryDelay}, nil
+ }
+
+ reqLogger.Info("start", "Telemetry configmap version", cm.GetResourceVersion())
+
+ result, err := applyTelemetryFn(ctx, r.Client, cm)
+ if err != nil {
+ reqLogger.Error(err, "Failed to send telemetry")
+ return ctrl.Result{Requeue: true, RequeueAfter: telemetryRetryDelay}, nil
+ }
+ if result.Requeue && result.RequeueAfter != 0 {
+ reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second))
+ }
+
+ return result, err
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *TelemetryReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewControllerManagedBy(mgr).
+ For(&corev1.ConfigMap{}).
+ WithEventFilter(predicate.NewPredicateFuncs(func(obj client.Object) bool {
+ labels := obj.GetLabels()
+ if labels == nil {
+ return false
+ }
+ return obj.GetName() == enterprise.GetTelemetryConfigMapName(ConfigMapNamePrefix) && labels["name"] == ConfigMapLabelName
+ })).
+ WithOptions(controller.Options{
+ MaxConcurrentReconciles: 1,
+ }).
+ Complete(r)
+}
diff --git a/internal/controller/telemetry_controller_test.go b/internal/controller/telemetry_controller_test.go
new file mode 100644
index 000000000..4760a3ace
--- /dev/null
+++ b/internal/controller/telemetry_controller_test.go
@@ -0,0 +1,321 @@
+package controller
+
+/*
+Copyright (c) 2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/kubernetes/scheme"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
+)
+
+var _ = Describe("Telemetry Controller", func() {
+ var (
+ ctx context.Context
+ cmName = "splunk-operator-telemetry"
+ ns = "test-telemetry-ns"
+ labels = map[string]string{"name": "splunk-operator"}
+ )
+
+ BeforeEach(func() {
+ ctx = context.TODO()
+ })
+
+ It("Reconcile returns requeue when ConfigMap not found", func() {
+ builder := fake.NewClientBuilder().WithScheme(scheme.Scheme)
+ c := builder.Build()
+ r := &TelemetryReconciler{Client: c, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+ result, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result.Requeue).To(BeTrue())
+ Expect(result.RequeueAfter).To(Equal(time.Second * 600))
+ })
+
+ It("Reconcile returns requeue when ConfigMap has no data", func() {
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: ns, Labels: labels},
+ Data: map[string]string{},
+ }
+ builder := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(cm)
+ c := builder.Build()
+ r := &TelemetryReconciler{Client: c, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+ result, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result.Requeue).To(BeTrue())
+ Expect(result.RequeueAfter).To(Equal(time.Second * 600))
+ })
+
+ It("Reconcile returns requeue when ConfigMap has data and ApplyTelemetry returns error", func() {
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: ns, Labels: labels},
+ Data: map[string]string{"foo": "bar"},
+ }
+ builder := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(cm)
+ c := builder.Build()
+ r := &TelemetryReconciler{Client: c, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+
+ // Patch applyTelemetryFn to return error
+ origApply := applyTelemetryFn
+ defer func() { applyTelemetryFn = origApply }()
+ applyTelemetryFn = func(ctx context.Context, client splcommon.ControllerClient, cm *corev1.ConfigMap) (reconcile.Result, error) {
+ return reconcile.Result{}, fmt.Errorf("fake error")
+ }
+
+ result, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result.Requeue).To(BeTrue())
+ Expect(result.RequeueAfter).To(Equal(time.Second * 600))
+ })
+
+ It("Reconcile returns result from ApplyTelemetry when ConfigMap has data and ApplyTelemetry returns requeue", func() {
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: ns, Labels: labels},
+ Data: map[string]string{"foo": "bar"},
+ }
+ builder := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(cm)
+ c := builder.Build()
+ r := &TelemetryReconciler{Client: c, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+
+ // Patch applyTelemetryFn to return a requeue result
+ origApply := applyTelemetryFn
+ defer func() { applyTelemetryFn = origApply }()
+ applyTelemetryFn = func(ctx context.Context, client splcommon.ControllerClient, cm *corev1.ConfigMap) (reconcile.Result, error) {
+ return reconcile.Result{Requeue: true, RequeueAfter: time.Second * 600}, nil
+ }
+
+ result, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result.Requeue).To(BeTrue())
+ Expect(result.RequeueAfter).To(Equal(time.Second * 600))
+ })
+
+ It("Reconcile returns success when ApplyTelemetry returns no requeue", func() {
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: ns, Labels: labels},
+ Data: map[string]string{"foo": "bar"},
+ }
+ builder := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(cm)
+ c := builder.Build()
+ r := &TelemetryReconciler{Client: c, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+
+ origApply := applyTelemetryFn
+ defer func() { applyTelemetryFn = origApply }()
+ applyTelemetryFn = func(ctx context.Context, client splcommon.ControllerClient, cm *corev1.ConfigMap) (reconcile.Result, error) {
+ return reconcile.Result{Requeue: false, RequeueAfter: 0}, nil
+ }
+
+ result, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result.Requeue).To(BeFalse())
+ Expect(result.RequeueAfter).To(Equal(time.Duration(0)))
+ })
+
+ It("Reconcile returns requeue when r.Get returns error (not NotFound)", func() {
+ r := &TelemetryReconciler{Client: &errorClient{}, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+ result, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result.Requeue).To(BeTrue())
+ Expect(result.RequeueAfter).To(Equal(time.Second * 600))
+ })
+
+ It("Reconcile recovers from panic in ApplyTelemetry", func() {
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: ns, Labels: labels},
+ Data: map[string]string{"foo": "bar"},
+ }
+ builder := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(cm)
+ c := builder.Build()
+ r := &TelemetryReconciler{Client: c, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+
+ // Patch applyTelemetryFn to panic
+ origApply := applyTelemetryFn
+ defer func() { applyTelemetryFn = origApply }()
+ applyTelemetryFn = func(ctx context.Context, client splcommon.ControllerClient, cm *corev1.ConfigMap) (reconcile.Result, error) {
+ panic("test panic")
+ }
+
+ // Should not panic, should recover and return requeue
+ Expect(func() {
+ _, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ }).NotTo(Panic())
+ })
+
+ It("Reconcile recovers from panic in r.Get", func() {
+ r := &TelemetryReconciler{Client: &panicClient{}, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+ Expect(func() {
+ _, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ }).NotTo(Panic())
+ })
+
+ It("Reconcile returns requeue when ApplyTelemetry returns Requeue=true and RequeueAfter=0", func() {
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: ns, Labels: labels},
+ Data: map[string]string{"foo": "bar"},
+ }
+ builder := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(cm)
+ c := builder.Build()
+ r := &TelemetryReconciler{Client: c, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+ origApply := applyTelemetryFn
+ defer func() { applyTelemetryFn = origApply }()
+ applyTelemetryFn = func(ctx context.Context, client splcommon.ControllerClient, cm *corev1.ConfigMap) (reconcile.Result, error) {
+ return reconcile.Result{Requeue: true, RequeueAfter: 0}, nil
+ }
+ result, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result.Requeue).To(BeTrue())
+ Expect(result.RequeueAfter).To(Equal(time.Duration(0)))
+ })
+
+ It("Reconcile returns result when ApplyTelemetry returns Requeue=false and RequeueAfter>0", func() {
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: ns, Labels: labels},
+ Data: map[string]string{"foo": "bar"},
+ }
+ builder := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(cm)
+ c := builder.Build()
+ r := &TelemetryReconciler{Client: c, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+ origApply := applyTelemetryFn
+ defer func() { applyTelemetryFn = origApply }()
+ applyTelemetryFn = func(ctx context.Context, client splcommon.ControllerClient, cm *corev1.ConfigMap) (reconcile.Result, error) {
+ return reconcile.Result{Requeue: false, RequeueAfter: time.Second * 123}, nil
+ }
+ result, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result.Requeue).To(BeFalse())
+ Expect(result.RequeueAfter).To(Equal(time.Second * 123))
+ })
+
+ It("Reconcile returns requeue when ApplyTelemetry returns error and result with Requeue=false", func() {
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: ns, Labels: labels},
+ Data: map[string]string{"foo": "bar"},
+ }
+ builder := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(cm)
+ c := builder.Build()
+ r := &TelemetryReconciler{Client: c, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+ origApply := applyTelemetryFn
+ defer func() { applyTelemetryFn = origApply }()
+ applyTelemetryFn = func(ctx context.Context, client splcommon.ControllerClient, cm *corev1.ConfigMap) (reconcile.Result, error) {
+ return reconcile.Result{Requeue: false, RequeueAfter: 0}, fmt.Errorf("some error")
+ }
+ result, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result.Requeue).To(BeTrue())
+ Expect(result.RequeueAfter).To(Equal(time.Second * 600))
+ })
+
+ It("Reconcile returns requeue when ApplyTelemetry returns error and result with Requeue=true and RequeueAfter>0", func() {
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: ns, Labels: labels},
+ Data: map[string]string{"foo": "bar"},
+ }
+ builder := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(cm)
+ c := builder.Build()
+ r := &TelemetryReconciler{Client: c, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+ origApply := applyTelemetryFn
+ defer func() { applyTelemetryFn = origApply }()
+ applyTelemetryFn = func(ctx context.Context, client splcommon.ControllerClient, cm *corev1.ConfigMap) (reconcile.Result, error) {
+ return reconcile.Result{Requeue: true, RequeueAfter: time.Second * 42}, fmt.Errorf("some error")
+ }
+ result, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result.Requeue).To(BeTrue())
+ Expect(result.RequeueAfter).To(Equal(time.Second * 600))
+ })
+
+ It("Reconcile returns result when ApplyTelemetry returns nil result and nil error", func() {
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: ns, Labels: labels},
+ Data: map[string]string{"foo": "bar"},
+ }
+ builder := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(cm)
+ c := builder.Build()
+ r := &TelemetryReconciler{Client: c, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+ origApply := applyTelemetryFn
+ defer func() { applyTelemetryFn = origApply }()
+ applyTelemetryFn = func(ctx context.Context, client splcommon.ControllerClient, cm *corev1.ConfigMap) (reconcile.Result, error) {
+ return reconcile.Result{}, nil
+ }
+ result, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result.Requeue).To(BeFalse())
+ Expect(result.RequeueAfter).To(Equal(time.Duration(0)))
+ })
+
+ It("Reconcile returns requeue when ApplyTelemetry returns nil result and non-nil error", func() {
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: ns, Labels: labels},
+ Data: map[string]string{"foo": "bar"},
+ }
+ builder := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(cm)
+ c := builder.Build()
+ r := &TelemetryReconciler{Client: c, Scheme: scheme.Scheme}
+ req := reconcile.Request{NamespacedName: types.NamespacedName{Name: cmName, Namespace: ns}}
+ origApply := applyTelemetryFn
+ defer func() { applyTelemetryFn = origApply }()
+ applyTelemetryFn = func(ctx context.Context, client splcommon.ControllerClient, cm *corev1.ConfigMap) (reconcile.Result, error) {
+ return reconcile.Result{}, fmt.Errorf("some error")
+ }
+ result, err := r.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result.Requeue).To(BeTrue())
+ Expect(result.RequeueAfter).To(Equal(time.Second * 600))
+ })
+})
+
+// Fake manager for SetupWithManager test
+type errorClient struct {
+ client.Client
+}
+
+func (e *errorClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error {
+ return fmt.Errorf("some error")
+}
+
+type panicClient struct {
+ client.Client
+}
+
+func (p *panicClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error {
+ panic("test panic")
+}
diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go
index 50ec481cb..4e657968f 100644
--- a/internal/controller/testutils/new.go
+++ b/internal/controller/testutils/new.go
@@ -45,6 +45,43 @@ func NewStandalone(name, ns, image string) *enterpriseApi.Standalone {
return ad
}
+// NewIngestorCluster returns new IngestorCluster instance with its config hash
+func NewIngestorCluster(name, ns, image string, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster {
+ return &enterpriseApi.IngestorCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns},
+ Spec: enterpriseApi.IngestorClusterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{ImagePullPolicy: string(pullPolicy)},
+ },
+ Replicas: 3,
+ QueueRef: corev1.ObjectReference{
+ Name: queue.Name,
+ Namespace: queue.Namespace,
+ },
+ ObjectStorageRef: corev1.ObjectReference{
+ Name: os.Name,
+ Namespace: os.Namespace,
+ },
+ },
+ }
+}
+
+// NewQueue returns new Queue instance with its config hash
+func NewQueue(name, ns string, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue {
+ return &enterpriseApi.Queue{
+ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns},
+ Spec: spec,
+ }
+}
+
+// NewObjectStorage returns new ObjectStorage instance with its config hash
+func NewObjectStorage(name, ns string, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage {
+ return &enterpriseApi.ObjectStorage{
+ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns},
+ Spec: spec,
+ }
+}
+
// NewSearchHeadCluster returns new serach head cluster instance with its config hash
func NewSearchHeadCluster(name, ns, image string) *enterpriseApi.SearchHeadCluster {
diff --git a/kuttl/tests/helm/index-and-ingest-separation/00-install-operator.yaml b/kuttl/tests/helm/index-and-ingest-separation/00-install-operator.yaml
new file mode 100644
index 000000000..602ebe0c1
--- /dev/null
+++ b/kuttl/tests/helm/index-and-ingest-separation/00-install-operator.yaml
@@ -0,0 +1,6 @@
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: ../script/installoperator.sh
+ background: false
\ No newline at end of file
diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml
new file mode 100644
index 000000000..a4aaa0824
--- /dev/null
+++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml
@@ -0,0 +1,5 @@
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: index-ing-sep-secret
diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml
new file mode 100644
index 000000000..591aa8fd5
--- /dev/null
+++ b/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml
@@ -0,0 +1,7 @@
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: kubectl create secret generic index-ing-sep-secret --from-literal=s3_access_key=$AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID --from-literal=s3_secret_key=$AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY --namespace $NAMESPACE
+ background: false
+ skipLogOutput: true
\ No newline at end of file
diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml
new file mode 100644
index 000000000..99669acf1
--- /dev/null
+++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml
@@ -0,0 +1,112 @@
+---
+# assert for queue custom resource to be ready
+apiVersion: enterprise.splunk.com/v4
+kind: Queue
+metadata:
+ name: queue
+spec:
+ provider: sqs
+ sqs:
+ name: index-ingest-separation-test-q
+ authRegion: us-west-2
+ endpoint: https://sqs.us-west-2.amazonaws.com
+ dlq: index-ingest-separation-test-dlq
+
+---
+# assert for object storage custom resource to be ready
+apiVersion: enterprise.splunk.com/v4
+kind: ObjectStorage
+metadata:
+ name: os
+spec:
+ provider: s3
+ s3:
+ endpoint: https://s3.us-west-2.amazonaws.com
+ path: index-ingest-separation-test-bucket/smartbus-test
+
+---
+# assert for cluster manager custom resource to be ready
+apiVersion: enterprise.splunk.com/v4
+kind: ClusterManager
+metadata:
+ name: cm
+status:
+ phase: Ready
+
+---
+# check if stateful sets are created
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: splunk-cm-cluster-manager
+status:
+ replicas: 1
+
+---
+# check if secret object are created
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-cm-cluster-manager-secret-v1
+
+---
+# assert for indexer cluster custom resource to be ready
+apiVersion: enterprise.splunk.com/v4
+kind: IndexerCluster
+metadata:
+ name: indexer
+spec:
+ replicas: 3
+ queueRef:
+ name: queue
+ objectStorageRef:
+ name: os
+status:
+ phase: Ready
+
+---
+# check for stateful set and replicas as configured
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: splunk-indexer-indexer
+status:
+ replicas: 3
+
+---
+# check if secret object are created
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-indexer-indexer-secret-v1
+
+---
+# assert for indexer cluster custom resource to be ready
+apiVersion: enterprise.splunk.com/v4
+kind: IngestorCluster
+metadata:
+ name: ingestor
+spec:
+ replicas: 3
+ queueRef:
+ name: queue
+ objectStorageRef:
+ name: os
+status:
+ phase: Ready
+
+---
+# check for stateful set and replicas as configured
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: splunk-ingestor-ingestor
+status:
+ replicas: 3
+
+---
+# check if secret object are created
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-ingestor-ingestor-secret-v1
\ No newline at end of file
diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-install-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-install-setup.yaml
new file mode 100644
index 000000000..0e9f5d58e
--- /dev/null
+++ b/kuttl/tests/helm/index-and-ingest-separation/02-install-setup.yaml
@@ -0,0 +1,6 @@
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - command: helm install splunk-index-ingest-sep $HELM_REPO_PATH/splunk-enterprise -f splunk_index_ingest_sep.yaml
+ namespaced: true
\ No newline at end of file
diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml
new file mode 100644
index 000000000..8bf619148
--- /dev/null
+++ b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml
@@ -0,0 +1,23 @@
+---
+# assert for ingestor cluster custom resource to be ready
+apiVersion: enterprise.splunk.com/v4
+kind: IngestorCluster
+metadata:
+ name: ingestor
+spec:
+ replicas: 4
+ queueRef:
+ name: queue
+ objectStorageRef:
+ name: os
+status:
+ phase: Ready
+
+---
+# check for stateful sets and replicas updated
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: splunk-ingestor-ingestor
+status:
+ replicas: 4
diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-scaleup-ingestor.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-scaleup-ingestor.yaml
new file mode 100644
index 000000000..731faf145
--- /dev/null
+++ b/kuttl/tests/helm/index-and-ingest-separation/03-scaleup-ingestor.yaml
@@ -0,0 +1,5 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - command: helm upgrade splunk-index-ingest-sep $HELM_REPO_PATH/splunk-enterprise --reuse-values --set ingestorCluster.replicaCount=4
+ namespaced: true
diff --git a/kuttl/tests/helm/index-and-ingest-separation/04-uninstall-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/04-uninstall-setup.yaml
new file mode 100644
index 000000000..85bf05dfe
--- /dev/null
+++ b/kuttl/tests/helm/index-and-ingest-separation/04-uninstall-setup.yaml
@@ -0,0 +1,5 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - command: helm uninstall splunk-index-ingest-sep
+ namespaced: true
diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml
new file mode 100644
index 000000000..8c733e53b
--- /dev/null
+++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml
@@ -0,0 +1,52 @@
+splunk-operator:
+ enabled: false
+ splunkOperator:
+ clusterWideAccess: false
+ persistentVolumeClaim:
+ storageClassName: gp2
+
+queue:
+ enabled: true
+ name: queue
+ provider: sqs
+ sqs:
+ name: index-ingest-separation-test-q
+ authRegion: us-west-2
+ endpoint: https://sqs.us-west-2.amazonaws.com
+ dlq: index-ingest-separation-test-dlq
+ volumes:
+ - name: helm-bus-secret-ref-test
+ secretRef: index-ing-sep-secret
+
+objectStorage:
+ enabled: true
+ name: os
+ provider: s3
+ s3:
+ endpoint: https://s3.us-west-2.amazonaws.com
+ path: index-ingest-separation-test-bucket/smartbus-test
+
+ingestorCluster:
+ enabled: true
+ name: ingestor
+ replicaCount: 3
+ queueRef:
+ name: queue
+ objectStorageRef:
+ name: os
+
+clusterManager:
+ enabled: true
+ name: cm
+ replicaCount: 1
+
+indexerCluster:
+ enabled: true
+ name: indexer
+ replicaCount: 3
+ clusterManagerRef:
+ name: cm
+ queueRef:
+ name: queue
+ objectStorageRef:
+ name: os
diff --git a/pkg/splunk/client/awss3client.go b/pkg/splunk/client/awss3client.go
index dab7f368a..b35d989c8 100644
--- a/pkg/splunk/client/awss3client.go
+++ b/pkg/splunk/client/awss3client.go
@@ -62,7 +62,7 @@ type AWSS3Client struct {
Downloader SplunkAWSDownloadClient
}
-var regionRegex = ".*.s3[-,.]([a-z]+-[a-z]+-[0-9]+)\\..*amazonaws.com"
+var regionRegex = ".*.s3[-,.]([a-z]+-[a-z]+(?:-[a-z]+)?-[0-9]+)\\..*amazonaws\\.com"
// GetRegion extracts the region from the endpoint field
func GetRegion(ctx context.Context, endpoint string, region *string) error {
diff --git a/pkg/splunk/client/awss3client_test.go b/pkg/splunk/client/awss3client_test.go
index a6af8efa2..5df04182c 100644
--- a/pkg/splunk/client/awss3client_test.go
+++ b/pkg/splunk/client/awss3client_test.go
@@ -69,6 +69,101 @@ func TestGetTLSVersion(t *testing.T) {
getTLSVersion(&tr)
}
}
+
+func TestGetRegion(t *testing.T) {
+ ctx := context.TODO()
+
+ tests := []struct {
+ name string
+ endpoint string
+ expectedRegion string
+ expectError bool
+ }{
+ {
+ name: "Standard 3-part region - us-west-2",
+ endpoint: "https://s3.us-west-2.amazonaws.com",
+ expectedRegion: "us-west-2",
+ expectError: false,
+ },
+ {
+ name: "Standard 3-part region - eu-west-1",
+ endpoint: "https://s3-eu-west-1.amazonaws.com",
+ expectedRegion: "eu-west-1",
+ expectError: false,
+ },
+ {
+ name: "GovCloud 4-part region - us-gov-west-1",
+ endpoint: "https://s3.us-gov-west-1.amazonaws.com",
+ expectedRegion: "us-gov-west-1",
+ expectError: false,
+ },
+ {
+ name: "GovCloud 4-part region - us-gov-east-1",
+ endpoint: "https://s3-us-gov-east-1.amazonaws.com",
+ expectedRegion: "us-gov-east-1",
+ expectError: false,
+ },
+ {
+ name: "ISO region - us-iso-east-1",
+ endpoint: "https://s3.us-iso-east-1.amazonaws.com",
+ expectedRegion: "us-iso-east-1",
+ expectError: false,
+ },
+ {
+ name: "ISOB region - us-isob-east-1",
+ endpoint: "https://s3.us-isob-east-1.amazonaws.com",
+ expectedRegion: "us-isob-east-1",
+ expectError: false,
+ },
+ {
+ name: "With bucket prefix",
+ endpoint: "https://mybucket.s3.us-west-2.amazonaws.com",
+ expectedRegion: "us-west-2",
+ expectError: false,
+ },
+ {
+ name: "GovCloud with bucket prefix",
+ endpoint: "https://mybucket.s3.us-gov-west-1.amazonaws.com",
+ expectedRegion: "us-gov-west-1",
+ expectError: false,
+ },
+ {
+ name: "Invalid endpoint - no region",
+ endpoint: "https://s3.amazonaws.com",
+ expectError: true,
+ },
+ {
+ name: "Invalid endpoint - non-AWS",
+ endpoint: "https://storage.googleapis.com",
+ expectError: true,
+ },
+ {
+ name: "Invalid endpoint - malformed domain",
+ endpoint: "https://s3.us-west-2.amazonawsXcom",
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var region string
+ err := GetRegion(ctx, tt.endpoint, ®ion)
+
+ if tt.expectError {
+ if err == nil {
+ t.Errorf("Expected error for endpoint %s, but got none", tt.endpoint)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error for endpoint %s: %v", tt.endpoint, err)
+ }
+ if region != tt.expectedRegion {
+ t.Errorf("Expected region %s, got %s for endpoint %s", tt.expectedRegion, region, tt.endpoint)
+ }
+ }
+ })
+ }
+}
func TestNewAWSS3Client(t *testing.T) {
ctx := context.TODO()
fn := InitAWSClientWrapper
diff --git a/pkg/splunk/client/doc.go b/pkg/splunk/client/doc.go
index 4426b855c..2521b657e 100644
--- a/pkg/splunk/client/doc.go
+++ b/pkg/splunk/client/doc.go
@@ -15,6 +15,6 @@
/*
Package client provides a simple client for the Splunk Enterprise REST API.
-This package has no depedencies outside of the standard go library.
+This package has no dependencies outside of the standard go library.
*/
package client
diff --git a/pkg/splunk/client/enterprise.go b/pkg/splunk/client/enterprise.go
index 8bc36b08a..9025102b1 100644
--- a/pkg/splunk/client/enterprise.go
+++ b/pkg/splunk/client/enterprise.go
@@ -16,10 +16,13 @@
package client
import (
+ "bytes"
+ "context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
+ "log/slog"
"net/http"
"regexp"
"strconv"
@@ -788,7 +791,7 @@ func (c *SplunkClient) AutomateMCApplyChanges() error {
return err
}
-// GetMonitoringconsoleServerRoles to retrive server roles of the local host or SplunkMonitoringConsole
+// GetMonitoringconsoleServerRoles to retrieve server roles of the local host or SplunkMonitoringConsole
func (c *SplunkClient) GetMonitoringconsoleServerRoles() (*MCServerRolesInfo, error) {
apiResponseServerRoles := struct {
Entry []struct {
@@ -954,6 +957,70 @@ func (c *SplunkClient) SetIdxcSecret(idxcSecret string) error {
return c.Do(request, expectedStatus, nil)
}
+type TelemetryResponse struct {
+ Message string `json:"message"`
+ MetricValueID string `json:"metricValueId"`
+}
+
+func (c *SplunkClient) SendTelemetry(path string, body []byte) (*TelemetryResponse, error) {
+ endpoint := fmt.Sprintf("%s%s", c.ManagementURI, path)
+ request, err := http.NewRequest("POST", endpoint, bytes.NewReader(body))
+ if err != nil {
+ return nil, err
+ }
+ request.Header.Set("Content-Type", "application/json")
+ expectedStatus := []int{201}
+ var response TelemetryResponse
+
+ err = c.Do(request, expectedStatus, &response)
+ if err != nil {
+ return nil, err
+ }
+ return &response, nil
+}
+
+// LicenseInfo represents license information from Splunk
+type LicenseInfo struct {
+ Title string `json:"title"`
+ Status string `json:"status"`
+ ExpirationTime int64 `json:"expiration_time"`
+ ID string `json:"guid"`
+ Type string `json:"type"`
+}
+
+// LicenseResponse represents the API response from /services/licenser/licenses
+type LicenseResponse struct {
+ Entry []struct {
+ Name string `json:"name"`
+ Content LicenseInfo `json:"content"`
+ } `json:"entry"`
+}
+
+// GetLicenseInfo retrieves license information from Splunk instance
+// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTlicense#licenser.2Flicenses
+func (c *SplunkClient) GetLicenseInfo() (map[string]LicenseInfo, error) {
+ endpoint := fmt.Sprintf("%s/services/licenser/licenses?output_mode=json", c.ManagementURI)
+ request, err := http.NewRequest("GET", endpoint, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var response LicenseResponse
+ expectedStatus := []int{200}
+ err = c.Do(request, expectedStatus, &response)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert response to map
+ licenses := make(map[string]LicenseInfo)
+ for _, entry := range response.Entry {
+ licenses[entry.Name] = entry.Content
+ }
+
+ return licenses, nil
+}
+
// RestartSplunk restarts specific Splunk instance
// Can be used for any Splunk Instance
// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTsystem#server.2Fcontrol.2Frestart
@@ -966,3 +1033,49 @@ func (c *SplunkClient) RestartSplunk() error {
expectedStatus := []int{200}
return c.Do(request, expectedStatus, nil)
}
+
+// Updates conf files and their properties
+// See https://help.splunk.com/en/splunk-enterprise/leverage-rest-apis/rest-api-reference/10.0/configuration-endpoints/configuration-endpoint-descriptions
+func (c *SplunkClient) UpdateConfFile(ctx context.Context, logger *slog.Logger, fileName, property string, propertyKVList [][]string) error {
+ // Creates an object in a conf file if it doesn't exist
+ endpoint := fmt.Sprintf("%s/servicesNS/nobody/system/configs/conf-%s", c.ManagementURI, fileName)
+ body := fmt.Sprintf("name=%s", property)
+
+ logger.InfoContext(ctx, "Creating conf file object if it does not exist", "fileName", fileName, "property", property)
+ request, err := http.NewRequest("POST", endpoint, strings.NewReader(body))
+ if err != nil {
+ logger.ErrorContext(ctx, "Failed to create conf file object if it does not exist", "fileName", fileName, "property", property, "error", err.Error())
+ return err
+ }
+
+ expectedStatus := []int{200, 201, 409}
+ err = c.Do(request, expectedStatus, nil)
+ if err != nil {
+ logger.ErrorContext(ctx, fmt.Sprintf("Status not in %v for conf file object creation", expectedStatus), "fileName", fileName, "property", property, "error", err.Error())
+ return err
+ }
+
+ // Updates a property of an object in a conf file
+ endpoint = fmt.Sprintf("%s/servicesNS/nobody/system/configs/conf-%s/%s", c.ManagementURI, fileName, property)
+ body = ""
+ for _, kv := range propertyKVList {
+ body += fmt.Sprintf("%s=%s&", kv[0], kv[1])
+ }
+ if len(body) > 0 && body[len(body)-1] == '&' {
+ body = body[:len(body)-1]
+ }
+
+ logger.DebugContext(ctx, "Updating conf file object", "fileName", fileName, "property", property)
+ request, err = http.NewRequest("POST", endpoint, strings.NewReader(body))
+ if err != nil {
+ logger.ErrorContext(ctx, "Failed to update conf file object", "fileName", fileName, "property", property, "error", err.Error())
+ return err
+ }
+
+ expectedStatus = []int{200, 201}
+ err = c.Do(request, expectedStatus, nil)
+ if err != nil {
+ logger.ErrorContext(ctx, fmt.Sprintf("Status not in %v for conf file object update", expectedStatus), "fileName", fileName, "property", property, "error", err.Error())
+ }
+ return err
+}
diff --git a/pkg/splunk/client/enterprise_test.go b/pkg/splunk/client/enterprise_test.go
index 9850b17c5..9a7322790 100644
--- a/pkg/splunk/client/enterprise_test.go
+++ b/pkg/splunk/client/enterprise_test.go
@@ -16,12 +16,15 @@
package client
import (
+ "bytes"
+ "context"
"fmt"
"net/http"
"net/url"
"strings"
"testing"
+ "github.com/splunk/splunk-operator/pkg/logging"
splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
spltest "github.com/splunk/splunk-operator/pkg/splunk/test"
@@ -642,6 +645,47 @@ func TestSetIdxcSecret(t *testing.T) {
splunkClientErrorTester(t, test)
}
+func TestSendTelemetry_Success(t *testing.T) {
+ path := "/services/telemetry/metrics"
+ bodyBytes := []byte(`{"metric":"value"}`)
+ wantRequest, _ := http.NewRequest("POST", "https://localhost:8089/services/telemetry/metrics", bytes.NewReader(bodyBytes))
+ wantRequest.Header.Set("Content-Type", "application/json")
+ wantResponse := TelemetryResponse{
+ Message: "Telemetry sent successfully",
+ MetricValueID: "abc123",
+ }
+ test := func(c SplunkClient) error {
+ resp, err := c.SendTelemetry(path, bodyBytes)
+ if err != nil {
+ return err
+ }
+ if resp.Message != wantResponse.Message || resp.MetricValueID != wantResponse.MetricValueID {
+ t.Errorf("SendTelemetry = %+v; want %+v", resp, wantResponse)
+ }
+ return nil
+ }
+ responseBody := `{"message":"Telemetry sent successfully","metricValueId":"abc123"}`
+ splunkClientTester(t, "TestSendTelemetry", 201, responseBody, wantRequest, test)
+}
+
+func TestSendTelemetry_Error(t *testing.T) {
+ path := "/services/telemetry/metrics"
+ bodyBytes := []byte(`{"metric":"value"}`)
+ wantRequest, _ := http.NewRequest("POST", "https://localhost:8089/services/telemetry/metrics", bytes.NewReader(bodyBytes))
+ wantRequest.Header.Set("Content-Type", "application/json")
+
+ test := func(c SplunkClient) error {
+ _, err := c.SendTelemetry(path, bodyBytes)
+ if err == nil {
+ t.Errorf("SendTelemetry should return error for 500 response code")
+ }
+ return nil
+ }
+
+ // Simulate a 500 error response from the mock client
+ splunkClientTester(t, "TestSendTelemetry_Error", 500, "", wantRequest, test)
+}
+
func TestRestartSplunk(t *testing.T) {
wantRequest, _ := http.NewRequest("POST", "https://localhost:8089/services/server/control/restart", nil)
test := func(c SplunkClient) error {
@@ -652,3 +696,54 @@ func TestRestartSplunk(t *testing.T) {
// Test invalid http request
splunkClientErrorTester(t, test)
}
+
+func TestUpdateConfFile(t *testing.T) {
+ // Test successful creation and update of conf property
+ property := "myproperty"
+ key := "mykey"
+ value := "myvalue"
+ fileName := "outputs"
+
+ ctx := context.TODO()
+ logger := logging.FromContext(ctx).With("func", "TestUpdateConfFile", "name", "test", "namespace", "test")
+
+ // First request: create the property (object) if it doesn't exist
+ createBody := strings.NewReader(fmt.Sprintf("name=%s", property))
+ wantCreateRequest, _ := http.NewRequest("POST", "https://localhost:8089/servicesNS/nobody/system/configs/conf-outputs", createBody)
+
+ // Second request: update the key/value for the property
+ updateBody := strings.NewReader(fmt.Sprintf("%s=%s", key, value))
+ wantUpdateRequest, _ := http.NewRequest("POST", fmt.Sprintf("https://localhost:8089/servicesNS/nobody/system/configs/conf-outputs/%s", property), updateBody)
+
+ mockSplunkClient := &spltest.MockHTTPClient{}
+ mockSplunkClient.AddHandler(wantCreateRequest, 201, "", nil)
+ mockSplunkClient.AddHandler(wantUpdateRequest, 200, "", nil)
+
+ c := NewSplunkClient("https://localhost:8089", "admin", "p@ssw0rd")
+ c.Client = mockSplunkClient
+
+ err := c.UpdateConfFile(ctx, logger, fileName, property, [][]string{{key, value}})
+ if err != nil {
+ t.Errorf("UpdateConfFile err = %v", err)
+ }
+ mockSplunkClient.CheckRequests(t, "TestUpdateConfFile")
+
+ // Negative test: error on create
+ mockSplunkClient = &spltest.MockHTTPClient{}
+ mockSplunkClient.AddHandler(wantCreateRequest, 500, "", nil)
+ c.Client = mockSplunkClient
+ err = c.UpdateConfFile(ctx, logger, fileName, property, [][]string{{key, value}})
+ if err == nil {
+ t.Errorf("UpdateConfFile expected error on create, got nil")
+ }
+
+ // Negative test: error on update
+ mockSplunkClient = &spltest.MockHTTPClient{}
+ mockSplunkClient.AddHandler(wantCreateRequest, 201, "", nil)
+ mockSplunkClient.AddHandler(wantUpdateRequest, 500, "", nil)
+ c.Client = mockSplunkClient
+ err = c.UpdateConfFile(ctx, logger, fileName, property, [][]string{{key, value}})
+ if err == nil {
+ t.Errorf("UpdateConfFile expected error on update, got nil")
+ }
+}
diff --git a/pkg/splunk/client/minioclient.go b/pkg/splunk/client/minioclient.go
index b7f27b52c..7d1269594 100644
--- a/pkg/splunk/client/minioclient.go
+++ b/pkg/splunk/client/minioclient.go
@@ -196,7 +196,10 @@ func (client *MinioClient) DownloadApp(ctx context.Context, downloadRequest Remo
options := minio.GetObjectOptions{}
// set the option to match the specified etag on remote storage
- options.SetMatchETag(downloadRequest.Etag)
+ if err = options.SetMatchETag(downloadRequest.Etag); err != nil {
+ scopedLog.Error(err, "Unable to set match etag")
+ return false, err
+ }
err = s3Client.FGetObject(ctx, client.BucketName, downloadRequest.RemoteFile, downloadRequest.LocalFile, options)
if err != nil {
diff --git a/pkg/splunk/client/names.go b/pkg/splunk/client/names.go
index f593c816d..1a55da7e5 100644
--- a/pkg/splunk/client/names.go
+++ b/pkg/splunk/client/names.go
@@ -3,47 +3,6 @@ package client
var invalidUrlByteArray = []byte{0x7F}
const (
- // Azure token fetch URL
- azureTokenFetchURL = "http://169.254.169.254/metadata/identity/oauth2/token"
-
- // Azure http header XMS version
- // https://docs.microsoft.com/en-us/rest/api/storageservices/versioning-for-the-azure-storage-services
- azureHTTPHeaderXmsVersion = "2021-08-06"
-
- // Azure Instance Metadata Service (IMDS) api-version parameter.
- // IMDS is versioned and specifying the API version in the HTTP request is mandatory.
- // https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service?tabs=linux
- azureIMDSApiVersion = "2021-10-01"
-
- // Azure URL for listing app packages
- // URL format is {azure_end_point}/{bucketName}?prefix=%s&restype=container&comp=list&include=snapshots&include=metadata"
- // For example : https://mystorageaccount.blob.core.windows.net/myappsbucket?prefix=standalone&restype=container&comp=list&include=snapshots&include=metadata
- azureBlobListAppFetchURL = "%s/%s?prefix=%s&restype=container&comp=list&include=snapshots&include=metadata"
-
- // Azure URL for downloading an app package
- // URL format is {azure_end_point}/{bucketName}/{pathToAppPackage}
- // For example : https://mystorageaccount.blob.core.windows.net/myappsbucket/standlone/myappsteamapp.tgz
- azureBlobDownloadAppFetchURL = "%s/%s/%s"
-
- // Header strings
- headerAuthorization = "Authorization"
- headerCacheControl = "Cache-Control"
- headerContentEncoding = "Content-Encoding"
- headerContentDisposition = "Content-Disposition"
- headerContentLanguage = "Content-Language"
- headerContentLength = "Content-Length"
- headerContentMD5 = "Content-MD5"
- headerContentType = "Content-Type"
- headerDate = "Date"
- headerIfMatch = "If-Match"
- headerIfModifiedSince = "If-Modified-Since"
- headerIfNoneMatch = "If-None-Match"
- headerIfUnmodifiedSince = "If-Unmodified-Since"
- headerRange = "Range"
- headerUserAgent = "User-Agent"
- headerXmsDate = "x-ms-date"
- headerXmsVersion = "x-ms-version"
-
awsRegionEndPointDelimiter = "|"
// Timeout for http clients used with appFramework
diff --git a/pkg/splunk/client/util.go b/pkg/splunk/client/util.go
index c8cadb58c..9ceb5edda 100644
--- a/pkg/splunk/client/util.go
+++ b/pkg/splunk/client/util.go
@@ -130,7 +130,7 @@ func CheckIfVolumeExists(volumeList []enterpriseApi.VolumeSpec, volName string)
return -1, fmt.Errorf("volume: %s, doesn't exist", volName)
}
-// GetAppSrcVolume gets the volume defintion for an app source
+// GetAppSrcVolume gets the volume definition for an app source
func GetAppSrcVolume(ctx context.Context, appSource enterpriseApi.AppSourceSpec, appFrameworkRef *enterpriseApi.AppFrameworkSpec) (enterpriseApi.VolumeSpec, error) {
var volName string
var index int
diff --git a/pkg/splunk/client/util_test.go b/pkg/splunk/client/util_test.go
index 1c3ba5d47..44a94aedc 100644
--- a/pkg/splunk/client/util_test.go
+++ b/pkg/splunk/client/util_test.go
@@ -17,10 +17,11 @@ package client
import (
"context"
- enterpriseApi "github.com/splunk/splunk-operator/api/v4"
"reflect"
"testing"
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+
spltest "github.com/splunk/splunk-operator/pkg/splunk/test"
)
diff --git a/pkg/splunk/common/doc.go b/pkg/splunk/common/doc.go
index bfeedd7f8..854dbf816 100644
--- a/pkg/splunk/common/doc.go
+++ b/pkg/splunk/common/doc.go
@@ -15,6 +15,6 @@
/*
Package common contains generic methods that are reused across other packages.
-This package has no depedencies other than the standard go and kubernetes libraries.
+This package has no dependencies other than the standard go and kubernetes libraries.
*/
package common
diff --git a/pkg/splunk/common/names.go b/pkg/splunk/common/names.go
index 32e892b96..b0f7e94ca 100644
--- a/pkg/splunk/common/names.go
+++ b/pkg/splunk/common/names.go
@@ -117,6 +117,7 @@ const (
var AppDownloadVolume string = "/opt/splunk/appframework/"
var EventPublisherKey contextKey = "eventPublisher"
+var EventRecorderKey contextKey = "eventRecorder"
// GetVersionedSecretName returns a versioned secret name
func GetVersionedSecretName(versionedSecretIdentifier string, version string) string {
diff --git a/pkg/splunk/enterprise/afwscheduler.go b/pkg/splunk/enterprise/afwscheduler.go
index 2dd2fd667..17cfb0ce4 100644
--- a/pkg/splunk/enterprise/afwscheduler.go
+++ b/pkg/splunk/enterprise/afwscheduler.go
@@ -35,6 +35,11 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
)
+var (
+ phaseManagerBusyWaitDuration = 1 * time.Second
+ phaseManagerLoopSleepDuration = 200 * time.Millisecond
+)
+
var appPhaseInfoStatuses = map[enterpriseApi.AppPhaseStatusType]bool{
enterpriseApi.AppPkgDownloadPending: true,
enterpriseApi.AppPkgDownloadInProgress: true,
@@ -55,7 +60,7 @@ var appPhaseInfoStatuses = map[enterpriseApi.AppPhaseStatusType]bool{
// isFanOutApplicableToCR confirms if a given CR needs fanOut support
func isFanOutApplicableToCR(cr splcommon.MetaObject) bool {
switch cr.GetObjectKind().GroupVersionKind().Kind {
- case "Standalone":
+ case "Standalone", "IngestorCluster":
return true
default:
return false
@@ -106,6 +111,8 @@ func getApplicablePodNameForAppFramework(cr splcommon.MetaObject, ordinalIdx int
podType = "cluster-manager"
case "MonitoringConsole":
podType = "monitoring-console"
+ case "IngestorCluster":
+ podType = "ingestor"
}
return fmt.Sprintf("splunk-%s-%s-%d", cr.GetName(), podType, ordinalIdx)
@@ -153,6 +160,8 @@ func getTelAppNameExtension(crKind string) (string, error) {
return "cmaster", nil
case "ClusterManager":
return "cmanager", nil
+ case "IngestorCluster":
+ return "ingestor", nil
default:
return "", errors.New("Invalid CR kind for telemetry app")
}
@@ -170,26 +179,20 @@ var addTelApp = func(ctx context.Context, podExecClient splutil.PodExecClientImp
// Create pod exec client
crKind := cr.GetObjectKind().GroupVersionKind().Kind
- // Get Tel App Name Extension
- appNameExt, err := getTelAppNameExtension(crKind)
- if err != nil {
- return err
- }
-
// Commands to run on pods
var command1, command2 string
// Handle non SHC scenarios(Standalone, CM, LM)
if crKind != "SearchHeadCluster" {
// Create dir on pods
- command1 = fmt.Sprintf(createTelAppNonShcString, appNameExt, appNameExt, telAppConfString, appNameExt, telAppDefMetaConfString, appNameExt)
+ command1 = fmt.Sprintf(createTelAppNonShcString, telAppConfString, telAppDefMetaConfString)
// App reload
command2 = telAppReloadString
} else {
// Create dir on pods
- command1 = fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, appNameExt, shcAppsLocationOnDeployer, appNameExt, telAppConfString, shcAppsLocationOnDeployer, appNameExt, telAppDefMetaConfString, shcAppsLocationOnDeployer, appNameExt)
+ command1 = fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, shcAppsLocationOnDeployer, telAppConfString, shcAppsLocationOnDeployer, telAppDefMetaConfString, shcAppsLocationOnDeployer)
// Bundle push
command2 = fmt.Sprintf(applySHCBundleCmdStr, GetSplunkStatefulsetURL(cr.GetNamespace(), SplunkSearchHead, cr.GetName(), 0, false), "/tmp/status.txt")
@@ -509,7 +512,7 @@ func (downloadWorker *PipelineWorker) download(ctx context.Context, pplnPhase *P
return
}
- // download is successfull, update the state and reset the retry count
+ // download is successful, update the state and reset the retry count
updatePplnWorkerPhaseInfo(ctx, appDeployInfo, 0, enterpriseApi.AppPkgDownloadComplete)
scopedLog.Info("Finished downloading app")
@@ -597,10 +600,10 @@ downloadWork:
default:
// All the workers are busy, check after one second
scopedLog.Info("All the workers are busy, we will check again after one second")
- time.Sleep(1 * time.Second)
+ time.Sleep(phaseManagerBusyWaitDuration)
}
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(phaseManagerLoopSleepDuration)
}
// wait for all the download threads to finish
@@ -680,7 +683,7 @@ downloadPhase:
}
}
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(phaseManagerLoopSleepDuration)
}
}
@@ -1002,7 +1005,11 @@ func runPodCopyWorker(ctx context.Context, worker *PipelineWorker, ch chan struc
}
// get the podExecClient to be used for copying file to pod
- podExecClient := splutil.GetPodExecClient(worker.client, cr, worker.targetPodName)
+ // Use injected client if available (for testing), otherwise create real client
+ podExecClient := worker.podExecClient
+ if podExecClient == nil {
+ podExecClient = splutil.GetPodExecClient(worker.client, cr, worker.targetPodName)
+ }
stdOut, stdErr, err := CopyFileToPod(ctx, worker.client, cr.GetNamespace(), appPkgLocalPath, appPkgPathOnPod, podExecClient)
if err != nil {
phaseInfo.FailCount++
@@ -1062,10 +1069,10 @@ podCopyHandler:
}
default:
// All the workers are busy, check after one second
- time.Sleep(1 * time.Second)
+ time.Sleep(phaseManagerBusyWaitDuration)
}
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(phaseManagerLoopSleepDuration)
}
// Wait for all the workers to finish
@@ -1131,7 +1138,7 @@ podCopyPhase:
}
}
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(phaseManagerLoopSleepDuration)
}
}
@@ -1231,9 +1238,12 @@ installHandler:
// Install workers can exist for local scope and premium app scopes
if installWorker != nil {
- podExecClient := splutil.GetPodExecClient(installWorker.client, installWorker.cr, installWorker.targetPodName)
+ // Use injected client if available (for testing), otherwise create real client
+ podExecClient := installWorker.podExecClient
+ if podExecClient == nil {
+ podExecClient = splutil.GetPodExecClient(installWorker.client, installWorker.cr, installWorker.targetPodName)
+ }
podID, _ := getOrdinalValFromPodName(installWorker.targetPodName)
-
// Get app source spec
appSrcSpec, err := getAppSrcSpec(installWorker.afwConfig.AppSources, installWorker.appSrcName)
if err != nil {
@@ -1264,10 +1274,10 @@ installHandler:
}
default:
- time.Sleep(1 * time.Second)
+ time.Sleep(phaseManagerBusyWaitDuration)
}
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(phaseManagerLoopSleepDuration)
}
for {
@@ -1287,7 +1297,7 @@ installHandler:
}
// Sleep for a second before retry
- time.Sleep(1 * time.Second)
+ time.Sleep(phaseManagerBusyWaitDuration)
}
// Wait for all the workers to finish
@@ -1383,7 +1393,7 @@ installPhase:
}
}
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(phaseManagerLoopSleepDuration)
}
}
@@ -1549,6 +1559,8 @@ func afwGetReleventStatefulsetByKind(ctx context.Context, cr splcommon.MetaObjec
instanceID = SplunkClusterManager
case "MonitoringConsole":
instanceID = SplunkMonitoringConsole
+ case "IngestorCluster":
+ instanceID = SplunkIngestor
default:
return nil
}
@@ -1671,7 +1683,7 @@ func (shcPlaybookContext *SHCPlaybookContext) isBundlePushComplete(ctx context.C
// remove the status file too, so that we dont have any stale status
removeErr := shcPlaybookContext.removeSHCBundlePushStatusFile(ctx)
if removeErr != nil {
- errors.Wrap(err, removeErr.Error())
+ err = errors.Wrap(err, removeErr.Error())
}
return false, err
}
@@ -1693,7 +1705,7 @@ func (shcPlaybookContext *SHCPlaybookContext) isBundlePushComplete(ctx context.C
// remove the status file too, so that we dont have any stale status
removeErr := shcPlaybookContext.removeSHCBundlePushStatusFile(ctx)
if removeErr != nil {
- errors.Wrap(err, removeErr.Error())
+ err = errors.Wrap(err, removeErr.Error())
}
return false, err
}
@@ -2174,8 +2186,8 @@ func afwSchedulerEntry(ctx context.Context, client splcommon.ControllerClient, c
scopedLog := reqLogger.WithName("afwSchedulerEntry").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
// return error, if there is no storage defined for the Operator pod
- if !isPersistantVolConfigured() {
- return true, fmt.Errorf("persistant volume required for the App framework, but not provisioned")
+ if !isPersistentVolConfigured() {
+ return true, fmt.Errorf("persistent volume required for the App framework, but not provisioned")
}
// Operator pod storage is not fully under operator control
@@ -2210,6 +2222,7 @@ func afwSchedulerEntry(ctx context.Context, client splcommon.ControllerClient, c
podExecClient := splutil.GetPodExecClient(client, cr, podName)
appsPathOnPod := filepath.Join(appBktMnt, appSrcName)
+
// create the dir on Splunk pod/s where app/s will be copied from operator pod
err = createDirOnSplunkPods(ctx, cr, *sts.Spec.Replicas, appsPathOnPod, podExecClient)
if err != nil {
diff --git a/pkg/splunk/enterprise/afwscheduler_test.go b/pkg/splunk/enterprise/afwscheduler_test.go
index 38668da69..53d8eeef1 100644
--- a/pkg/splunk/enterprise/afwscheduler_test.go
+++ b/pkg/splunk/enterprise/afwscheduler_test.go
@@ -377,6 +377,13 @@ func TestGetApplicablePodNameForAppFramework(t *testing.T) {
if expectedPodName != returnedPodName {
t.Errorf("Unable to fetch correct pod name. Expected %s, returned %s", expectedPodName, returnedPodName)
}
+
+ cr.TypeMeta.Kind = "IngestorCluster"
+ expectedPodName = "splunk-stack1-ingestor-0"
+ returnedPodName = getApplicablePodNameForAppFramework(&cr, podID)
+ if expectedPodName != returnedPodName {
+ t.Errorf("Unable to fetch correct pod name. Expected %s, returned %s", expectedPodName, returnedPodName)
+ }
}
func TestInitAppInstallPipeline(t *testing.T) {
@@ -713,6 +720,16 @@ func TestPhaseManagersTermination(t *testing.T) {
}
func TestPhaseManagersMsgChannels(t *testing.T) {
+ // Override timing variables for faster test execution
+ origBusyWait := phaseManagerBusyWaitDuration
+ origLoopSleep := phaseManagerLoopSleepDuration
+ phaseManagerBusyWaitDuration = 1 * time.Millisecond
+ phaseManagerLoopSleepDuration = 1 * time.Millisecond
+ defer func() {
+ phaseManagerBusyWaitDuration = origBusyWait
+ phaseManagerLoopSleepDuration = origLoopSleep
+ }()
+
ctx := context.TODO()
appDeployContext := &enterpriseApi.AppDeploymentContext{
AppsStatusMaxConcurrentAppDownloads: 1,
@@ -789,6 +806,18 @@ func TestPhaseManagersMsgChannels(t *testing.T) {
t.Errorf("unable to apply statefulset")
}
+ // Create mock PodExecClient for all workers
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: &cr,
+ TargetPodName: "splunk-stack1-standalone-0",
+ }
+ mockClient.AddMockPodExecReturnContext(ctx, "", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+
// Just make the lint conversion checks happy
capacity := 1
var workerList []*PipelineWorker = make([]*PipelineWorker, capacity)
@@ -806,9 +835,10 @@ func TestPhaseManagersMsgChannels(t *testing.T) {
FailCount: 2,
},
},
- afwConfig: &cr.Spec.AppFrameworkConfig,
- client: client,
- fanOut: cr.GetObjectKind().GroupVersionKind().Kind == "Standalone",
+ afwConfig: &cr.Spec.AppFrameworkConfig,
+ client: client,
+ fanOut: cr.GetObjectKind().GroupVersionKind().Kind == "Standalone",
+ podExecClient: mockClient,
}
}
@@ -832,7 +862,7 @@ func TestPhaseManagersMsgChannels(t *testing.T) {
}
worker.appDeployInfo.PhaseInfo.FailCount = 4
// Let the phase hop on empty channel, to get more coverage
- time.Sleep(600 * time.Millisecond)
+ time.Sleep(10 * time.Millisecond)
ppln.pplnPhases[enterpriseApi.PhaseDownload].q = nil
// add the worker to the pod copy phase
@@ -859,7 +889,7 @@ func TestPhaseManagersMsgChannels(t *testing.T) {
}
worker.appDeployInfo.PhaseInfo.FailCount = 4
// Let the phase hop on empty channel, to get more coverage
- time.Sleep(600 * time.Millisecond)
+ time.Sleep(10 * time.Millisecond)
ppln.pplnPhases[enterpriseApi.PhasePodCopy].q = nil
// add the worker to the install phase
@@ -879,7 +909,7 @@ func TestPhaseManagersMsgChannels(t *testing.T) {
worker.appDeployInfo.PhaseInfo.FailCount = 4
// Let the phase hop on empty channel, to get more coverage
- time.Sleep(600 * time.Millisecond)
+ time.Sleep(10 * time.Millisecond)
close(ppln.sigTerm)
@@ -1346,7 +1376,7 @@ func TestAfwGetReleventStatefulsetByKind(t *testing.T) {
_, _ = splctrl.ApplyStatefulSet(ctx, c, ¤t)
if afwGetReleventStatefulsetByKind(ctx, &cr, c) == nil {
- t.Errorf("Unable to get the sts for SHC deployer")
+ t.Errorf("Unable to get the sts for LicenseManager")
}
// Test if STS works for Standalone
@@ -1360,7 +1390,21 @@ func TestAfwGetReleventStatefulsetByKind(t *testing.T) {
_, _ = splctrl.ApplyStatefulSet(ctx, c, ¤t)
if afwGetReleventStatefulsetByKind(ctx, &cr, c) == nil {
- t.Errorf("Unable to get the sts for SHC deployer")
+ t.Errorf("Unable to get the sts for Standalone")
+ }
+
+ // Test if STS works for IngestorCluster
+ cr.TypeMeta.Kind = "IngestorCluster"
+ current = appsv1.StatefulSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-stack1-ingestor",
+ Namespace: "test",
+ },
+ }
+
+ _, _ = splctrl.ApplyStatefulSet(ctx, c, ¤t)
+ if afwGetReleventStatefulsetByKind(ctx, &cr, c) == nil {
+ t.Errorf("Unable to get the sts for IngestorCluster")
}
// Negative testing
@@ -2127,7 +2171,10 @@ func TestExtractClusterScopedAppOnPod(t *testing.T) {
}
func TestRunPodCopyWorker(t *testing.T) {
- ctx := context.TODO()
+ // Use context with timeout to prevent workers from hanging indefinitely
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
cr := enterpriseApi.ClusterManager{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterManager",
@@ -2189,6 +2236,21 @@ func TestRunPodCopyWorker(t *testing.T) {
var client splcommon.ControllerClient = getConvertedClient(c)
var waiter sync.WaitGroup
+ // Create MockPodExecClient to avoid real network I/O
+ mockPodExecClient := &spltest.MockPodExecClient{
+ Client: c,
+ Cr: &cr,
+ TargetPodName: "splunk-stack1-clustermanager-0",
+ }
+
+ // Setup mock responses for CopyFileToPod operations
+ dirCheckCmd := fmt.Sprintf("test -d %s; echo -n $?", "/operator-staging/appframework/adminApps")
+ mockPodExecClient.AddMockPodExecReturnContext(ctx, dirCheckCmd, &spltest.MockPodExecReturnContext{
+ StdOut: "0",
+ StdErr: "",
+ Err: nil,
+ })
+
worker := &PipelineWorker{
cr: &cr,
targetPodName: "splunk-stack1-clustermanager-0",
@@ -2201,10 +2263,11 @@ func TestRunPodCopyWorker(t *testing.T) {
},
ObjectHash: "abcd1234abcd",
},
- client: client,
- afwConfig: appFrameworkConfig,
- waiter: &waiter,
- appSrcName: appFrameworkConfig.AppSources[0].Name,
+ client: client,
+ afwConfig: appFrameworkConfig,
+ waiter: &waiter,
+ appSrcName: appFrameworkConfig.AppSources[0].Name,
+ podExecClient: mockPodExecClient, // Inject the mock to avoid real network I/O
}
var ch chan struct{} = make(chan struct{}, 1)
@@ -2252,7 +2315,19 @@ func TestRunPodCopyWorker(t *testing.T) {
}
func TestPodCopyWorkerHandler(t *testing.T) {
- ctx := context.TODO()
+ // Override timing variables for faster test execution
+ origBusyWait := phaseManagerBusyWaitDuration
+ origLoopSleep := phaseManagerLoopSleepDuration
+ phaseManagerBusyWaitDuration = 1 * time.Millisecond
+ phaseManagerLoopSleepDuration = 1 * time.Millisecond
+ defer func() {
+ phaseManagerBusyWaitDuration = origBusyWait
+ phaseManagerLoopSleepDuration = origLoopSleep
+ }()
+
+ // Use context with timeout to prevent workers from hanging indefinitely
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
cr := enterpriseApi.ClusterManager{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterManager",
@@ -2310,6 +2385,28 @@ func TestPodCopyWorkerHandler(t *testing.T) {
// Add object
client.AddObject(pod)
+ // Create MockPodExecClient to avoid real network I/O
+ mockPodExecClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: &cr,
+ TargetPodName: "splunk-stack1-clustermanager-0",
+ }
+
+ // Setup mock responses for CopyFileToPod operations
+ // CopyFileToPod makes 2 exec calls:
+ // 1. Directory existence check: "test -d ; echo -n $?"
+ // 2. Tar extraction: ["tar", "-xf", "-", "-C", ""]
+
+ // Mock response for directory check (should return "0" for success)
+ dirCheckCmd := fmt.Sprintf("test -d %s; echo -n $?", "/operator-staging/appframework/adminApps")
+ mockPodExecClient.AddMockPodExecReturnContext(ctx, dirCheckCmd, &spltest.MockPodExecReturnContext{
+ StdOut: "0",
+ StdErr: "",
+ Err: nil,
+ })
+
+ // Note: tar command will be handled by the default case in MockPodExecClient (returns empty with nil error)
+
worker := &PipelineWorker{
cr: &cr,
targetPodName: "splunk-stack1-clustermanager-0",
@@ -2322,9 +2419,10 @@ func TestPodCopyWorkerHandler(t *testing.T) {
},
ObjectHash: "abcd1234abcd",
},
- client: client,
- afwConfig: appFrameworkConfig,
- appSrcName: appFrameworkConfig.AppSources[0].Name,
+ client: client,
+ afwConfig: appFrameworkConfig,
+ appSrcName: appFrameworkConfig.AppSources[0].Name,
+ podExecClient: mockPodExecClient, // Inject the mock to avoid real network I/O
}
defaultVol := splcommon.AppDownloadVolume
@@ -2371,7 +2469,7 @@ func TestPodCopyWorkerHandler(t *testing.T) {
ppln.pplnPhases[enterpriseApi.PhaseInstall].msgChannel <- worker
- time.Sleep(2 * time.Second)
+ time.Sleep(10 * time.Millisecond)
// sending null worker should not cause a crash
ppln.pplnPhases[enterpriseApi.PhaseInstall].msgChannel <- nil
@@ -2382,7 +2480,7 @@ func TestPodCopyWorkerHandler(t *testing.T) {
}
// wait for the handler to consue the worker
- time.Sleep(2 * time.Second)
+ time.Sleep(10 * time.Millisecond)
// Closing the channels should exit podCopyWorkerHandler test cleanly
close(ppln.pplnPhases[enterpriseApi.PhaseInstall].msgChannel)
@@ -3871,6 +3969,16 @@ func TestHandleAppPkgInstallComplete(t *testing.T) {
}
func TestInstallWorkerHandler(t *testing.T) {
+ // Override timing variables for faster test execution
+ origBusyWait := phaseManagerBusyWaitDuration
+ origLoopSleep := phaseManagerLoopSleepDuration
+ phaseManagerBusyWaitDuration = 1 * time.Millisecond
+ phaseManagerLoopSleepDuration = 1 * time.Millisecond
+ defer func() {
+ phaseManagerBusyWaitDuration = origBusyWait
+ phaseManagerLoopSleepDuration = origLoopSleep
+ }()
+
ctx := context.TODO()
cr := enterpriseApi.ClusterManager{
TypeMeta: metav1.TypeMeta{
@@ -3948,6 +4056,18 @@ func TestInstallWorkerHandler(t *testing.T) {
t.Errorf("unable to apply statefulset")
}
+ // Create mock PodExecClient to avoid real pod command execution
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: &cr,
+ TargetPodName: "splunk-stack1-clustermanager-0",
+ }
+ mockClient.AddMockPodExecReturnContext(ctx, "", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+
worker := &PipelineWorker{
cr: &cr,
targetPodName: "splunk-stack1-clustermanager-0",
@@ -3960,10 +4080,11 @@ func TestInstallWorkerHandler(t *testing.T) {
},
ObjectHash: "abcd1234abcd",
},
- client: client,
- afwConfig: appFrameworkConfig,
- sts: sts,
- appSrcName: appFrameworkConfig.AppSources[0].Name,
+ client: client,
+ afwConfig: appFrameworkConfig,
+ sts: sts,
+ appSrcName: appFrameworkConfig.AppSources[0].Name,
+ podExecClient: mockClient,
}
var appDeployContext *enterpriseApi.AppDeploymentContext = &enterpriseApi.AppDeploymentContext{
@@ -4245,6 +4366,7 @@ func TestGetTelAppNameExtension(t *testing.T) {
"SearchHeadCluster": "shc",
"ClusterMaster": "cmaster",
"ClusterManager": "cmanager",
+ "IngestorCluster": "ingestor",
}
// Test all CR kinds
@@ -4280,7 +4402,7 @@ func TestAddTelAppCMaster(t *testing.T) {
// Define mock podexec context
podExecCommands := []string{
- fmt.Sprintf(createTelAppNonShcString, "cmaster", "cmaster", telAppConfString, "cmaster", telAppDefMetaConfString, "cmaster"),
+ fmt.Sprintf(createTelAppNonShcString, telAppConfString, telAppDefMetaConfString),
telAppReloadString,
}
@@ -4304,7 +4426,7 @@ func TestAddTelAppCMaster(t *testing.T) {
// Test shc
podExecCommands = []string{
- fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, "shc", shcAppsLocationOnDeployer, "shc", telAppConfString, shcAppsLocationOnDeployer, "shc", telAppDefMetaConfString, shcAppsLocationOnDeployer, "shc"),
+ fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, shcAppsLocationOnDeployer, telAppConfString, shcAppsLocationOnDeployer, telAppDefMetaConfString, shcAppsLocationOnDeployer),
fmt.Sprintf(applySHCBundleCmdStr, GetSplunkStatefulsetURL(shcCr.GetNamespace(), SplunkSearchHead, shcCr.GetName(), 0, false), "/tmp/status.txt"),
}
@@ -4320,7 +4442,7 @@ func TestAddTelAppCMaster(t *testing.T) {
// Test non-shc error 1
podExecCommandsError := []string{
- fmt.Sprintf(createTelAppNonShcString, "cmerror", "cmerror", telAppConfString, "cmerror", telAppDefMetaConfString, "cmerror"),
+ fmt.Sprintf(createTelAppNonShcString, telAppConfString, telAppDefMetaConfString),
}
mockPodExecReturnContextsError := []*spltest.MockPodExecReturnContext{
@@ -4339,7 +4461,7 @@ func TestAddTelAppCMaster(t *testing.T) {
// Test non-shc error 2
podExecCommandsError = []string{
- fmt.Sprintf(createTelAppNonShcString, "cm", "cm", telAppConfString, "cm", telAppDefMetaConfString, "cm"),
+ fmt.Sprintf(createTelAppNonShcString, telAppConfString, telAppDefMetaConfString),
}
var mockPodExecClientError2 *spltest.MockPodExecClient = &spltest.MockPodExecClient{Cr: cmCr}
mockPodExecClientError2.AddMockPodExecReturnContexts(ctx, podExecCommandsError, mockPodExecReturnContextsError...)
@@ -4351,7 +4473,7 @@ func TestAddTelAppCMaster(t *testing.T) {
// Test shc error 1
podExecCommandsError = []string{
- fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, "shcerror", shcAppsLocationOnDeployer, "shcerror", telAppConfString, shcAppsLocationOnDeployer, "shcerror", telAppDefMetaConfString, shcAppsLocationOnDeployer, "shcerror"),
+ fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, shcAppsLocationOnDeployer, telAppConfString, shcAppsLocationOnDeployer, telAppDefMetaConfString, shcAppsLocationOnDeployer),
}
var mockPodExecClientError3 *spltest.MockPodExecClient = &spltest.MockPodExecClient{Cr: shcCr}
@@ -4364,7 +4486,7 @@ func TestAddTelAppCMaster(t *testing.T) {
// Test shc error 2
podExecCommandsError = []string{
- fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, "shc", shcAppsLocationOnDeployer, "shc", telAppConfString, shcAppsLocationOnDeployer, "shc", telAppDefMetaConfString, shcAppsLocationOnDeployer, "shc"),
+ fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, shcAppsLocationOnDeployer, telAppConfString, shcAppsLocationOnDeployer, telAppDefMetaConfString, shcAppsLocationOnDeployer),
}
var mockPodExecClientError4 *spltest.MockPodExecClient = &spltest.MockPodExecClient{Cr: shcCr}
mockPodExecClientError4.AddMockPodExecReturnContexts(ctx, podExecCommandsError, mockPodExecReturnContextsError...)
@@ -4393,7 +4515,7 @@ func TestAddTelAppCManager(t *testing.T) {
// Define mock podexec context
podExecCommands := []string{
- fmt.Sprintf(createTelAppNonShcString, "cmanager", "cmanager", telAppConfString, "cmanager", telAppDefMetaConfString, "cmanager"),
+ fmt.Sprintf(createTelAppNonShcString, telAppConfString, telAppDefMetaConfString),
telAppReloadString,
}
@@ -4417,7 +4539,7 @@ func TestAddTelAppCManager(t *testing.T) {
// Test shc
podExecCommands = []string{
- fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, "shc", shcAppsLocationOnDeployer, "shc", telAppConfString, shcAppsLocationOnDeployer, "shc", telAppDefMetaConfString, shcAppsLocationOnDeployer, "shc"),
+ fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, shcAppsLocationOnDeployer, telAppConfString, shcAppsLocationOnDeployer, telAppDefMetaConfString, shcAppsLocationOnDeployer),
fmt.Sprintf(applySHCBundleCmdStr, GetSplunkStatefulsetURL(shcCr.GetNamespace(), SplunkSearchHead, shcCr.GetName(), 0, false), "/tmp/status.txt"),
}
@@ -4433,7 +4555,7 @@ func TestAddTelAppCManager(t *testing.T) {
// Test non-shc error 1
podExecCommandsError := []string{
- fmt.Sprintf(createTelAppNonShcString, "cmerror", "cmerror", telAppConfString, "cmerror", telAppDefMetaConfString, "cmerror"),
+ fmt.Sprintf(createTelAppNonShcString, telAppConfString, telAppDefMetaConfString),
}
mockPodExecReturnContextsError := []*spltest.MockPodExecReturnContext{
@@ -4452,7 +4574,7 @@ func TestAddTelAppCManager(t *testing.T) {
// Test non-shc error 2
podExecCommandsError = []string{
- fmt.Sprintf(createTelAppNonShcString, "cm", "cm", telAppConfString, "cm", telAppDefMetaConfString, "cm"),
+ fmt.Sprintf(createTelAppNonShcString, telAppConfString, telAppDefMetaConfString),
}
var mockPodExecClientError2 *spltest.MockPodExecClient = &spltest.MockPodExecClient{Cr: cmCr}
mockPodExecClientError2.AddMockPodExecReturnContexts(ctx, podExecCommandsError, mockPodExecReturnContextsError...)
@@ -4464,7 +4586,7 @@ func TestAddTelAppCManager(t *testing.T) {
// Test shc error 1
podExecCommandsError = []string{
- fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, "shcerror", shcAppsLocationOnDeployer, "shcerror", telAppConfString, shcAppsLocationOnDeployer, "shcerror", telAppDefMetaConfString, shcAppsLocationOnDeployer, "shcerror"),
+ fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, shcAppsLocationOnDeployer, telAppConfString, shcAppsLocationOnDeployer, telAppDefMetaConfString, shcAppsLocationOnDeployer),
}
var mockPodExecClientError3 *spltest.MockPodExecClient = &spltest.MockPodExecClient{Cr: shcCr}
@@ -4477,7 +4599,7 @@ func TestAddTelAppCManager(t *testing.T) {
// Test shc error 2
podExecCommandsError = []string{
- fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, "shc", shcAppsLocationOnDeployer, "shc", telAppConfString, shcAppsLocationOnDeployer, "shc", telAppDefMetaConfString, shcAppsLocationOnDeployer, "shc"),
+ fmt.Sprintf(createTelAppShcString, shcAppsLocationOnDeployer, shcAppsLocationOnDeployer, telAppConfString, shcAppsLocationOnDeployer, telAppDefMetaConfString, shcAppsLocationOnDeployer),
}
var mockPodExecClientError4 *spltest.MockPodExecClient = &spltest.MockPodExecClient{Cr: shcCr}
mockPodExecClientError4.AddMockPodExecReturnContexts(ctx, podExecCommandsError, mockPodExecReturnContextsError...)
diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go
index b7a54910c..bab4440a9 100644
--- a/pkg/splunk/enterprise/clustermanager.go
+++ b/pkg/splunk/enterprise/clustermanager.go
@@ -22,7 +22,6 @@ import (
"time"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
- "sigs.k8s.io/controller-runtime/pkg/client"
rclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/go-logr/logr"
@@ -40,24 +39,24 @@ import (
)
// ApplyClusterManager reconciles the state of a Splunk Enterprise cluster manager.
-func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (reconcile.Result, error) {
-
+// podExecClient parameter is optional - if nil, a real PodExecClient will be created.
+// This allows tests to inject a mock client.
+func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) (result reconcile.Result, err error) {
// unless modified, reconcile for this object will be requeued after 5 seconds
- result := reconcile.Result{
+ result = reconcile.Result{
Requeue: true,
RequeueAfter: time.Second * 5,
}
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("ApplyClusterManager")
- eventPublisher, _ := newK8EventPublisher(client, cr)
+
+ eventPublisher := GetEventPublisher(ctx, cr)
ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
cr.Kind = "ClusterManager"
if cr.Status.ResourceRevMap == nil {
cr.Status.ResourceRevMap = make(map[string]string)
}
-
- var err error
// Initialize phase
cr.Status.Phase = enterpriseApi.PhaseError
@@ -226,8 +225,10 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient,
scopedLog.Error(err, "Error in deleting automated monitoring console resource")
}
- // Create podExecClient
- podExecClient := splutil.GetPodExecClient(client, cr, "")
+ // Create podExecClient (use injected one if provided, otherwise create real one)
+ if podExecClient == nil {
+ podExecClient = splutil.GetPodExecClient(client, cr, "")
+ }
// Add a splunk operator telemetry app
if cr.Spec.EtcVolumeStorageConfig.EphemeralStorage || !cr.Status.TelAppInstalled {
@@ -242,7 +243,7 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient,
// Manager apps bundle push requires multiple reconcile iterations in order to reflect the configMap on the CM pod.
// So keep PerformCmBundlePush() as the last call in this block of code, so that other functionalities are not blocked
- err = PerformCmBundlePush(ctx, client, cr)
+ err = PerformCmBundlePush(ctx, client, cr, podExecClient)
if err != nil {
return result, err
}
@@ -322,7 +323,8 @@ func getClusterManagerStatefulSet(ctx context.Context, client splcommon.Controll
func CheckIfsmartstoreConfigMapUpdatedToPod(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) error {
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("CheckIfsmartstoreConfigMapUpdatedToPod").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
- eventPublisher, _ := newK8EventPublisher(c, cr)
+
+ eventPublisher := GetEventPublisher(ctx, cr)
command := fmt.Sprintf("cat /mnt/splunk-operator/local/%s", configToken)
streamOptions := splutil.NewStreamOptionsObject(command)
@@ -349,8 +351,9 @@ func CheckIfsmartstoreConfigMapUpdatedToPod(ctx context.Context, c splcommon.Con
return fmt.Errorf("smartstore ConfigMap is missing")
}
-// PerformCmBundlePush initiates the bundle push from cluster manager
-func PerformCmBundlePush(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error {
+// PerformCmBundlePush performs cluster manager bundle push operation
+// Defined as a variable to allow mocking in unit tests
+var PerformCmBundlePush = func(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) error {
if !cr.Status.BundlePushTracker.NeedToPushManagerApps {
return nil
}
@@ -375,8 +378,11 @@ func PerformCmBundlePush(ctx context.Context, c splcommon.ControllerClient, cr *
// for the configMap update to the Pod before proceeding for the manager apps
// bundle push.
- cmPodName := fmt.Sprintf("splunk-%s-%s-0", cr.GetName(), "cluster-manager")
- podExecClient := splutil.GetPodExecClient(c, cr, cmPodName)
+ // Create podExecClient if not provided
+ if podExecClient == nil {
+ cmPodName := fmt.Sprintf("splunk-%s-%s-0", cr.GetName(), "cluster-manager")
+ podExecClient = splutil.GetPodExecClient(c, cr, cmPodName)
+ }
err := CheckIfsmartstoreConfigMapUpdatedToPod(ctx, c, cr, podExecClient)
if err != nil {
return err
@@ -401,7 +407,9 @@ func PerformCmBundlePush(ctx context.Context, c splcommon.ControllerClient, cr *
func PushManagerAppsBundle(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error {
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("PushManagerApps").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
- eventPublisher, _ := newK8EventPublisher(c, cr)
+
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, cr)
defaultSecretObjName := splcommon.GetNamespaceScopedSecretName(cr.GetNamespace())
defaultSecret, err := splutil.GetSecretByName(ctx, c, cr.GetNamespace(), defaultSecretObjName)
@@ -413,7 +421,6 @@ func PushManagerAppsBundle(ctx context.Context, c splcommon.ControllerClient, cr
//Get the admin password from the secret object
adminPwd, foundSecret := defaultSecret.Data["password"]
if !foundSecret {
- eventPublisher.Warning(ctx, "PushManagerAppsBundle", "could not find admin password while trying to push the manager apps bundle")
return fmt.Errorf("could not find admin password while trying to push the manager apps bundle")
}
@@ -429,7 +436,7 @@ func PushManagerAppsBundle(ctx context.Context, c splcommon.ControllerClient, cr
}
// helper function to get the list of ClusterManager types in the current namespace
-func getClusterManagerList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (int, error) {
+func getClusterManagerList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (int, error) {
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("getClusterManagerList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
@@ -477,7 +484,9 @@ var GetCMMultisiteEnvVarsCall = func(ctx context.Context, cr *enterpriseApi.Clus
func changeClusterManagerAnnotations(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error {
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
- eventPublisher, _ := newK8EventPublisher(c, cr)
+
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, cr)
clusterManagerInstance := &enterpriseApi.ClusterManager{}
if len(cr.Spec.ClusterManagerRef.Name) > 0 {
diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go
index 586adb316..4a71bf8be 100644
--- a/pkg/splunk/enterprise/clustermanager_test.go
+++ b/pkg/splunk/enterprise/clustermanager_test.go
@@ -36,6 +36,7 @@ import (
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/tools/record"
runtime "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
@@ -55,6 +56,11 @@ func TestApplyClusterManager(t *testing.T) {
return nil
}
+ // Mock the event publisher to return a valid (empty) publisher in tests
+ newK8EventPublisher = func(recorder record.EventRecorder, instance pkgruntime.Object) (*K8EventPublisher, error) {
+ return &K8EventPublisher{}, nil
+ }
+
GetCMMultisiteEnvVarsCall = func(ctx context.Context, cr *enterpriseApi.ClusterManager, namespaceScopedSecret *corev1.Secret) ([]corev1.EnvVar, error) {
extraEnv := getClusterManagerExtraEnv(cr, &cr.Spec.CommonSplunkSpec)
return extraEnv, nil
@@ -137,7 +143,7 @@ func TestApplyClusterManager(t *testing.T) {
revised.Spec.Image = "splunk/test"
revised.SetGroupVersionKind(gvk)
reconcile := func(c *spltest.MockClient, cr interface{}) error {
- _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager))
+ _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager), nil)
return err
}
spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyClusterManager", ¤t, revised, createCalls, updateCalls, reconcile, true)
@@ -147,7 +153,7 @@ func TestApplyClusterManager(t *testing.T) {
revised.ObjectMeta.DeletionTimestamp = ¤tTime
revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"}
deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) {
- _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager))
+ _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager), nil)
return true, err
}
splunkDeletionTester(t, revised, deleteFunc)
@@ -159,7 +165,7 @@ func TestApplyClusterManager(t *testing.T) {
c := spltest.NewMockClient()
_ = errors.New(splcommon.Rerr)
current.Kind = "ClusterManager"
- _, err := ApplyClusterManager(ctx, c, ¤t)
+ _, err := ApplyClusterManager(ctx, c, ¤t, nil)
if err == nil {
t.Errorf("Expected error")
}
@@ -226,7 +232,7 @@ func TestApplyClusterManager(t *testing.T) {
}
current.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, ¤t)
+ _, err = ApplyClusterManager(ctx, c, ¤t, nil)
if err == nil {
t.Errorf("Expected error")
}
@@ -243,7 +249,7 @@ func TestApplyClusterManager(t *testing.T) {
current.Status.SmartStore.VolList[0].SecretRef = "s3-secret"
current.Status.ResourceRevMap["s3-secret"] = "v2"
current.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, ¤t)
+ _, err = ApplyClusterManager(ctx, c, ¤t, nil)
if err == nil {
t.Errorf("Expected error")
}
@@ -258,7 +264,7 @@ func TestApplyClusterManager(t *testing.T) {
current.Spec.SmartStore.VolList[0].SecretRef = ""
current.Spec.SmartStore.Defaults.IndexAndGlobalCommonSpec.VolName = "msos_s2s3_vol"
current.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, ¤t)
+ _, err = ApplyClusterManager(ctx, c, ¤t, nil)
if err != nil {
t.Errorf("Don't expected error here")
}
@@ -315,7 +321,7 @@ func TestApplyClusterManager(t *testing.T) {
},
}
current.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, ¤t)
+ _, err = ApplyClusterManager(ctx, c, ¤t, nil)
if err == nil {
t.Errorf("Expected error")
}
@@ -333,7 +339,7 @@ func TestApplyClusterManager(t *testing.T) {
rerr := errors.New(splcommon.Rerr)
c.InduceErrorKind[splcommon.MockClientInduceErrorGet] = rerr
current.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, ¤t)
+ _, err = ApplyClusterManager(ctx, c, ¤t, nil)
if err == nil {
t.Errorf("Expected error")
}
@@ -535,7 +541,7 @@ func TestClusterManagerSpecNotCreatedWithoutGeneralTerms(t *testing.T) {
c := spltest.NewMockClient()
// Attempt to apply the cluster manager spec
- _, err := ApplyClusterManager(ctx, c, &cm)
+ _, err := ApplyClusterManager(ctx, c, &cm, nil)
// Assert that an error is returned
if err == nil {
@@ -663,7 +669,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) {
// Without S3 keys, ApplyClusterManager should fail
current.Kind = "ClusterManager"
- _, err := ApplyClusterManager(ctx, client, ¤t)
+ _, err := ApplyClusterManager(ctx, client, ¤t, nil)
if err == nil {
t.Errorf("ApplyClusterManager should fail without S3 secrets configured")
}
@@ -693,7 +699,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) {
revised.Spec.Image = "splunk/test"
reconcile := func(c *spltest.MockClient, cr interface{}) error {
current.Kind = "ClusterManager"
- _, err := ApplyClusterManager(context.Background(), c, cr.(*enterpriseApi.ClusterManager))
+ _, err := ApplyClusterManager(context.Background(), c, cr.(*enterpriseApi.ClusterManager), nil)
return err
}
@@ -721,12 +727,12 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) {
current.Status.BundlePushTracker.NeedToPushManagerApps = true
current.Kind = "ClusterManager"
- if _, err = ApplyClusterManager(context.Background(), client, ¤t); err != nil {
+ if _, err = ApplyClusterManager(context.Background(), client, ¤t, nil); err != nil {
t.Errorf("ApplyClusterManager() should not have returned error")
}
current.Spec.CommonSplunkSpec.EtcVolumeStorageConfig.StorageCapacity = "-abcd"
- if _, err := ApplyClusterManager(context.Background(), client, ¤t); err == nil {
+ if _, err := ApplyClusterManager(context.Background(), client, ¤t, nil); err == nil {
t.Errorf("ApplyClusterManager() should have returned error")
}
@@ -736,7 +742,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) {
ss.Spec.Replicas = &replicas
ss.Spec.Template.Spec.Containers[0].Image = "splunk/splunk"
client.AddObject(ss)
- if result, err := ApplyClusterManager(context.Background(), client, ¤t); err == nil && !result.Requeue {
+ if result, err := ApplyClusterManager(context.Background(), client, ¤t, nil); err == nil && !result.Requeue {
t.Errorf("ApplyClusterManager() should have returned error or result.requeue should have been false")
}
@@ -746,7 +752,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) {
client.AddObjects(objects)
current.Spec.CommonSplunkSpec.Mock = false
- if _, err := ApplyClusterManager(context.Background(), client, ¤t); err == nil {
+ if _, err := ApplyClusterManager(context.Background(), client, ¤t, nil); err == nil {
t.Errorf("ApplyClusterManager() should have returned error")
}
}
@@ -774,7 +780,7 @@ func TestPerformCmBundlePush(t *testing.T) {
// When the secret object is not present, should return an error
current.Status.BundlePushTracker.NeedToPushManagerApps = true
- err := PerformCmBundlePush(ctx, client, ¤t)
+ err := PerformCmBundlePush(ctx, client, ¤t, nil)
if err == nil {
t.Errorf("Should return error, when the secret object is not present")
}
@@ -806,28 +812,28 @@ func TestPerformCmBundlePush(t *testing.T) {
//Re-attempting to push the CM bundle in less than 5 seconds should return an error
current.Status.BundlePushTracker.LastCheckInterval = time.Now().Unix() - 1
- err = PerformCmBundlePush(ctx, client, ¤t)
+ err = PerformCmBundlePush(ctx, client, ¤t, nil)
if err == nil {
t.Errorf("Bundle Push Should fail, if attempted to push within 5 seconds interval")
}
//Re-attempting to push the CM bundle after 5 seconds passed, should not return an error
current.Status.BundlePushTracker.LastCheckInterval = time.Now().Unix() - 10
- err = PerformCmBundlePush(ctx, client, ¤t)
+ err = PerformCmBundlePush(ctx, client, ¤t, nil)
if err != nil && strings.HasPrefix(err.Error(), "Will re-attempt to push the bundle after the 5 seconds") {
t.Errorf("Bundle Push Should not fail if reattempted after 5 seconds interval passed. Error: %s", err.Error())
}
// When the CM Bundle push is not pending, should not return an error
current.Status.BundlePushTracker.NeedToPushManagerApps = false
- err = PerformCmBundlePush(ctx, client, ¤t)
+ err = PerformCmBundlePush(ctx, client, ¤t, nil)
if err != nil {
t.Errorf("Should not return an error when the Bundle push is not required. Error: %s", err.Error())
}
// Negative testing
current.Status.BundlePushTracker.NeedToPushManagerApps = true
- err = PerformCmBundlePush(ctx, client, ¤t)
+ err = PerformCmBundlePush(ctx, client, ¤t, nil)
if err != nil && strings.HasPrefix(err.Error(), "Will re-attempt to push the bundle after the 5 seconds") {
t.Errorf("Bundle Push Should not fail if reattempted after 5 seconds interval passed. Error: %s", err.Error())
}
@@ -958,7 +964,7 @@ func TestAppFrameworkApplyClusterManagerShouldNotFail(t *testing.T) {
}
cm.Kind = "ClusterManager"
- _, err = ApplyClusterManager(context.Background(), client, &cm)
+ _, err = ApplyClusterManager(context.Background(), client, &cm, nil)
if err != nil {
t.Errorf("ApplyClusterManager should not have returned error here.")
}
@@ -1061,7 +1067,7 @@ func TestApplyClusterManagerDeletion(t *testing.T) {
t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume)
}
cm.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, &cm)
+ _, err = ApplyClusterManager(ctx, c, &cm, nil)
if err != nil {
t.Errorf("ApplyClusterManager should not have returned error here.")
}
@@ -1576,7 +1582,7 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) {
cm.Kind = "ClusterManager"
client.Create(ctx, &cm)
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
}
@@ -1715,7 +1721,7 @@ func TestChangeClusterManagerAnnotations(t *testing.T) {
cm.Kind = "ClusterManager"
client.Create(ctx, cm)
- _, err = ApplyClusterManager(ctx, client, cm)
+ _, err = ApplyClusterManager(ctx, client, cm, nil)
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
}
@@ -1746,6 +1752,41 @@ func TestClusterManagerWitReadyState(t *testing.T) {
newpath := filepath.Join("/tmp", "appframework")
_ = os.MkdirAll(newpath, os.ModePerm)
+ // Mock GetCMMultisiteEnvVarsCall to avoid 5-second HTTP timeout
+ // This function tries to connect to Splunk REST API which doesn't exist in unit tests
+ GetCMMultisiteEnvVarsCall = func(ctx context.Context, cr *enterpriseApi.ClusterManager, namespaceScopedSecret *corev1.Secret) ([]corev1.EnvVar, error) {
+ extraEnv := getClusterManagerExtraEnv(cr, &cr.Spec.CommonSplunkSpec)
+ return extraEnv, nil
+ }
+
+ savedPerformCmBundlePush := PerformCmBundlePush
+ PerformCmBundlePush = func(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) error {
+ // Just set the flag to false to simulate successful bundle push
+ cr.Status.BundlePushTracker.NeedToPushManagerApps = false
+ return nil
+ }
+ defer func() { PerformCmBundlePush = savedPerformCmBundlePush }()
+
+ // Mock GetPodExecClient to return a mock client that simulates pod operations locally
+ savedGetPodExecClient := splutil.GetPodExecClient
+ splutil.GetPodExecClient = func(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) splutil.PodExecClientImpl {
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: cr,
+ TargetPodName: targetPodName,
+ }
+ // Add mock responses for common commands
+ ctx := context.TODO()
+ // Mock mkdir command (used by createDirOnSplunkPods)
+ mockClient.AddMockPodExecReturnContext(ctx, "", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+ return mockClient
+ }
+ defer func() { splutil.GetPodExecClient = savedGetPodExecClient }()
+
// adding getapplist to fix test case
GetAppsList = func(ctx context.Context, remoteDataClientMgr RemoteDataClientManager) (splclient.RemoteDataListResponse, error) {
remoteDataListResponse := splclient.RemoteDataListResponse{}
@@ -1870,7 +1911,7 @@ func TestClusterManagerWitReadyState(t *testing.T) {
// simulate create clustermanager instance before reconcilation
c.Create(ctx, clustermanager)
- _, err := ApplyClusterManager(ctx, c, clustermanager)
+ _, err := ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for clustermanager with app framework %v", err)
debug.PrintStack()
@@ -1913,7 +1954,7 @@ func TestClusterManagerWitReadyState(t *testing.T) {
// call reconciliation
clustermanager.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, clustermanager)
+ _, err = ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err)
debug.PrintStack()
@@ -2032,7 +2073,7 @@ func TestClusterManagerWitReadyState(t *testing.T) {
// call reconciliation
clustermanager.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, clustermanager)
+ _, err = ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err)
debug.PrintStack()
diff --git a/pkg/splunk/enterprise/clustermaster.go b/pkg/splunk/enterprise/clustermaster.go
index 496ecb31e..88e2c3815 100644
--- a/pkg/splunk/enterprise/clustermaster.go
+++ b/pkg/splunk/enterprise/clustermaster.go
@@ -47,7 +47,8 @@ func ApplyClusterMaster(ctx context.Context, client splcommon.ControllerClient,
}
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("ApplyClusterMaster")
- eventPublisher, _ := newK8EventPublisher(client, cr)
+
+ eventPublisher := GetEventPublisher(ctx, cr)
cr.Kind = "ClusterMaster"
if cr.Status.ResourceRevMap == nil {
@@ -303,7 +304,9 @@ func getClusterMasterStatefulSet(ctx context.Context, client splcommon.Controlle
func CheckIfMastersmartstoreConfigMapUpdatedToPod(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApiV3.ClusterMaster, podExecClient splutil.PodExecClientImpl) error {
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("CheckIfMastersmartstoreConfigMapUpdatedToPod").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
- eventPublisher, _ := newK8EventPublisher(c, cr)
+
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, cr)
command := fmt.Sprintf("cat /mnt/splunk-operator/local/%s", configToken)
streamOptions := splutil.NewStreamOptionsObject(command)
@@ -331,7 +334,8 @@ func CheckIfMastersmartstoreConfigMapUpdatedToPod(ctx context.Context, c splcomm
}
// PerformCmasterBundlePush initiates the bundle push from cluster manager
-func PerformCmasterBundlePush(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApiV3.ClusterMaster) error {
+// Defined as a variable to allow mocking in unit tests
+var PerformCmasterBundlePush = func(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApiV3.ClusterMaster) error {
if !cr.Status.BundlePushTracker.NeedToPushMasterApps {
return nil
}
@@ -382,7 +386,9 @@ func PerformCmasterBundlePush(ctx context.Context, c splcommon.ControllerClient,
func PushMasterAppsBundle(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApiV3.ClusterMaster) error {
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("PushMasterApps").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
- eventPublisher, _ := newK8EventPublisher(c, cr)
+
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, cr)
defaultSecretObjName := splcommon.GetNamespaceScopedSecretName(cr.GetNamespace())
defaultSecret, err := splutil.GetSecretByName(ctx, c, cr.GetNamespace(), defaultSecretObjName)
@@ -428,7 +434,8 @@ func getClusterMasterList(ctx context.Context, c splcommon.ControllerClient, cr
}
// VerifyCMasterisMultisite checks if its a multisite
-func VerifyCMasterisMultisite(ctx context.Context, cr *enterpriseApiV3.ClusterMaster, namespaceScopedSecret *corev1.Secret) ([]corev1.EnvVar, error) {
+// Defined as a variable to allow mocking in unit tests
+var VerifyCMasterisMultisite = func(ctx context.Context, cr *enterpriseApiV3.ClusterMaster, namespaceScopedSecret *corev1.Secret) ([]corev1.EnvVar, error) {
var err error
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("Verify if Multisite Indexer Cluster").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
diff --git a/pkg/splunk/enterprise/clustermaster_test.go b/pkg/splunk/enterprise/clustermaster_test.go
index fea79b90d..f9b49b496 100644
--- a/pkg/splunk/enterprise/clustermaster_test.go
+++ b/pkg/splunk/enterprise/clustermaster_test.go
@@ -241,6 +241,27 @@ func TestClusterMasterSpecNotCreatedWithoutGeneralTerms(t *testing.T) {
func TestApplyClusterMasterWithSmartstore(t *testing.T) {
os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ // Mock VerifyCMasterisMultisite to avoid 5-second HTTP timeout
+ // This function tries to connect to Splunk REST API which doesn't exist in unit tests
+ savedVerifyCMasterisMultisite := VerifyCMasterisMultisite
+ defer func() { VerifyCMasterisMultisite = savedVerifyCMasterisMultisite }()
+ VerifyCMasterisMultisite = func(ctx context.Context, cr *enterpriseApiV3.ClusterMaster, namespaceScopedSecret *corev1.Secret) ([]corev1.EnvVar, error) {
+ extraEnv := getClusterMasterExtraEnv(cr, &cr.Spec.CommonSplunkSpec)
+ return extraEnv, nil
+ }
+
+ // Mock PerformCmasterBundlePush to avoid pod exec operations
+ // When Mock=false and NeedToPushMasterApps=true, return error to simulate test expectations
+ savedPerformCmasterBundlePush := PerformCmasterBundlePush
+ defer func() { PerformCmasterBundlePush = savedPerformCmasterBundlePush }()
+ PerformCmasterBundlePush = func(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApiV3.ClusterMaster) error {
+ if !cr.Spec.CommonSplunkSpec.Mock && cr.Status.BundlePushTracker.NeedToPushMasterApps {
+ return fmt.Errorf("simulated bundle push error when Mock=false")
+ }
+ return nil
+ }
+
ctx := context.TODO()
funcCalls := []spltest.MockFuncCall{
{MetaName: "*v1.Secret-test-splunk-test-secret"},
@@ -1161,16 +1182,51 @@ func TestCheckIfMastersmartstoreConfigMapUpdatedToPod(t *testing.T) {
func TestClusterMasterWitReadyState(t *testing.T) {
os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ // Mock VerifyCMasterisMultisite to avoid 5-second HTTP timeout
+ // This function tries to connect to Splunk REST API which doesn't exist in unit tests
+ savedVerifyCMasterisMultisiteForReadyState := VerifyCMasterisMultisite
+ defer func() { VerifyCMasterisMultisite = savedVerifyCMasterisMultisiteForReadyState }()
+ VerifyCMasterisMultisite = func(ctx context.Context, cr *enterpriseApiV3.ClusterMaster, namespaceScopedSecret *corev1.Secret) ([]corev1.EnvVar, error) {
+ extraEnv := getClusterMasterExtraEnv(cr, &cr.Spec.CommonSplunkSpec)
+ return extraEnv, nil
+ }
+
+ // Initialize GlobalResourceTracker to enable app framework
+ initGlobalResourceTracker()
+
// create directory for app framework
newpath := filepath.Join("/tmp", "appframework")
_ = os.MkdirAll(newpath, os.ModePerm)
// adding getapplist to fix test case
+ savedGetAppsListForReadyState := GetAppsList
+ defer func() { GetAppsList = savedGetAppsListForReadyState }()
GetAppsList = func(ctx context.Context, remoteDataClientMgr RemoteDataClientManager) (splclient.RemoteDataListResponse, error) {
RemoteDataListResponse := splclient.RemoteDataListResponse{}
return RemoteDataListResponse, nil
}
+ // Mock GetPodExecClient to return a mock client that simulates pod operations locally
+ savedGetPodExecClient := splutil.GetPodExecClient
+ splutil.GetPodExecClient = func(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) splutil.PodExecClientImpl {
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: cr,
+ TargetPodName: targetPodName,
+ }
+ // Add mock responses for common commands
+ ctx := context.TODO()
+ // Mock mkdir command (used by createDirOnSplunkPods)
+ mockClient.AddMockPodExecReturnContext(ctx, "mkdir -p", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+ return mockClient
+ }
+ defer func() { splutil.GetPodExecClient = savedGetPodExecClient }()
+
sch := pkgruntime.NewScheme()
utilruntime.Must(clientgoscheme.AddToScheme(sch))
utilruntime.Must(corev1.AddToScheme(sch))
diff --git a/pkg/splunk/enterprise/configuration.go b/pkg/splunk/enterprise/configuration.go
index 78ed47b2f..cb9322f0a 100644
--- a/pkg/splunk/enterprise/configuration.go
+++ b/pkg/splunk/enterprise/configuration.go
@@ -85,6 +85,13 @@ var defaultStartupProbe corev1.Probe = corev1.Probe{
},
}
+const (
+ defaultRequestsCPU = "0.1"
+ defaultRequestsMemory = "512Mi"
+ defaultLimitsCPU = "4"
+ defaultLimitsMemory = "8Gi"
+)
+
// getSplunkLabels returns a map of labels to use for Splunk Enterprise components.
func getSplunkLabels(instanceIdentifier string, instanceType InstanceType, partOfIdentifier string) map[string]string {
// For multisite / multipart IndexerCluster, the name of the part containing the cluster-manager is used
@@ -257,7 +264,7 @@ func setVolumeDefaults(spec *enterpriseApi.CommonSplunkSpec) {
for _, v := range spec.Volumes {
if v.Secret != nil {
if v.Secret.DefaultMode == nil {
- perm := int32(corev1.SecretVolumeSourceDefaultMode)
+ perm := corev1.SecretVolumeSourceDefaultMode
v.Secret.DefaultMode = &perm
}
continue
@@ -265,7 +272,7 @@ func setVolumeDefaults(spec *enterpriseApi.CommonSplunkSpec) {
if v.ConfigMap != nil {
if v.ConfigMap.DefaultMode == nil {
- perm := int32(corev1.ConfigMapVolumeSourceDefaultMode)
+ perm := corev1.ConfigMapVolumeSourceDefaultMode
v.ConfigMap.DefaultMode = &perm
}
continue
@@ -366,12 +373,12 @@ func validateCommonSplunkSpec(ctx context.Context, c splcommon.ControllerClient,
defaultResources := corev1.ResourceRequirements{
Requests: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("0.1"),
- corev1.ResourceMemory: resource.MustParse("512Mi"),
+ corev1.ResourceCPU: resource.MustParse(defaultRequestsCPU),
+ corev1.ResourceMemory: resource.MustParse(defaultRequestsMemory),
},
Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("4"),
- corev1.ResourceMemory: resource.MustParse("8Gi"),
+ corev1.ResourceCPU: resource.MustParse(defaultLimitsCPU),
+ corev1.ResourceMemory: resource.MustParse(defaultLimitsMemory),
},
}
@@ -391,11 +398,11 @@ func validateCommonSplunkSpec(ctx context.Context, c splcommon.ControllerClient,
}
if spec.LivenessInitialDelaySeconds < 0 {
- return fmt.Errorf("negative value (%d) is not allowed for Liveness probe intial delay", spec.LivenessInitialDelaySeconds)
+ return fmt.Errorf("negative value (%d) is not allowed for Liveness probe initial delay", spec.LivenessInitialDelaySeconds)
}
if spec.ReadinessInitialDelaySeconds < 0 {
- return fmt.Errorf("negative value (%d) is not allowed for Readiness probe intial delay", spec.ReadinessInitialDelaySeconds)
+ return fmt.Errorf("negative value (%d) is not allowed for Readiness probe initial delay", spec.ReadinessInitialDelaySeconds)
}
err = validateSplunkGeneralTerms()
@@ -467,6 +474,9 @@ func getSplunkPorts(instanceType InstanceType) map[string]int {
case SplunkIndexer:
result[GetPortName(hecPort, protoHTTP)] = 8088
result[GetPortName(s2sPort, protoTCP)] = 9997
+ case SplunkIngestor:
+ result[GetPortName(hecPort, protoHTTP)] = 8088
+ result[GetPortName(s2sPort, protoTCP)] = 9997
}
return result
@@ -833,7 +843,7 @@ func updateSplunkPodTemplateWithConfig(ctx context.Context, client splcommon.Con
}
// Explicitly set the default value here so we can compare for changes correctly with current statefulset.
- secretVolDefaultMode := int32(corev1.SecretVolumeSourceDefaultMode)
+ secretVolDefaultMode := corev1.SecretVolumeSourceDefaultMode
addSplunkVolumeToTemplate(podTemplateSpec, "mnt-splunk-secrets", "/mnt/splunk-secrets", corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretToMount,
@@ -842,7 +852,7 @@ func updateSplunkPodTemplateWithConfig(ctx context.Context, client splcommon.Con
})
// Explicitly set the default value here so we can compare for changes correctly with current statefulset.
- configMapVolDefaultMode := int32(corev1.ConfigMapVolumeSourceDefaultMode)
+ configMapVolDefaultMode := corev1.ConfigMapVolumeSourceDefaultMode
// add inline defaults to all splunk containers other than MC(where CR spec defaults are not needed)
if spec.Defaults != "" {
@@ -927,10 +937,6 @@ func updateSplunkPodTemplateWithConfig(ctx context.Context, client splcommon.Con
if instanceType == SplunkStandalone && (len(spec.ClusterMasterRef.Name) > 0 || len(spec.ClusterManagerRef.Name) > 0) {
role = SplunkSearchHead.ToRole()
}
- domainName := os.Getenv("CLUSTER_DOMAIN")
- if domainName == "" {
- domainName = "cluster.local"
- }
env := []corev1.EnvVar{
{Name: "SPLUNK_HOME", Value: "/opt/splunk"},
{Name: "SPLUNK_START_ARGS", Value: "--accept-license"},
@@ -1901,7 +1907,7 @@ maxGlobalDataSizeMB = %d`, indexesConf, indexes[i].MaxGlobalDataSizeMB)
maxGlobalRawDataSizeMB = %d`, indexesConf, indexes[i].MaxGlobalRawDataSizeMB)
}
- // Add a new line in betwen index stanzas
+ // Add a new line in between index stanzas
// Do not add config beyond here
indexesConf = fmt.Sprintf(`%s
`, indexesConf)
diff --git a/pkg/splunk/enterprise/configuration_test.go b/pkg/splunk/enterprise/configuration_test.go
index 3be6d0393..2c92e3ec4 100644
--- a/pkg/splunk/enterprise/configuration_test.go
+++ b/pkg/splunk/enterprise/configuration_test.go
@@ -254,7 +254,7 @@ func TestSmartstoreApplyClusterManagerFailsOnInvalidSmartStoreConfig(t *testing.
client := spltest.NewMockClient()
- _, err := ApplyClusterManager(context.TODO(), client, &cr)
+ _, err := ApplyClusterManager(context.TODO(), client, &cr, nil)
if err == nil {
t.Errorf("ApplyClusterManager should fail on invalid smartstore config")
}
@@ -1816,3 +1816,18 @@ func TestValidateLivenessProbe(t *testing.T) {
t.Errorf("Unexpected error when less than deault values passed for livenessProbe InitialDelaySeconds %d, TimeoutSeconds %d, PeriodSeconds %d. Error %s", livenessProbe.InitialDelaySeconds, livenessProbe.TimeoutSeconds, livenessProbe.PeriodSeconds, err)
}
}
+
+func TestGetSplunkPorts(t *testing.T) {
+ test := func(instanceType InstanceType) {
+ ports := getSplunkPorts(instanceType)
+ require.Equal(t, 8000, ports["http-splunkweb"])
+ require.Equal(t, 8089, ports["https-splunkd"])
+ require.Equal(t, 8088, ports["http-hec"])
+ require.Equal(t, 9997, ports["tcp-s2s"])
+ }
+
+ test(SplunkStandalone)
+ test(SplunkIndexer)
+ test(SplunkIngestor)
+ test(SplunkMonitoringConsole)
+}
diff --git a/pkg/splunk/enterprise/events.go b/pkg/splunk/enterprise/events.go
index ebdc13d62..d1f7e721a 100644
--- a/pkg/splunk/enterprise/events.go
+++ b/pkg/splunk/enterprise/events.go
@@ -18,79 +18,77 @@ package enterprise
import (
"context"
- enterpriseApi "github.com/splunk/splunk-operator/api/v4"
-
- enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
- corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/log"
)
// K8EventPublisher structure used to publish k8s event
type K8EventPublisher struct {
- client splcommon.ControllerClient
- instance interface{}
+ recorder record.EventRecorder
+ instance runtime.Object
}
-// private function to get new k8s event publisher
-func newK8EventPublisher(client splcommon.ControllerClient, instance interface{}) (*K8EventPublisher, error) {
+// newK8EventPublisher creates a new k8s event publisher (variable to allow mocking in tests)
+var newK8EventPublisher = func(recorder record.EventRecorder, instance runtime.Object) (*K8EventPublisher, error) {
eventPublisher := &K8EventPublisher{
- client: client,
+ recorder: recorder,
instance: instance,
}
return eventPublisher, nil
}
-// publishEvents adds events to k8s
-func (k *K8EventPublisher) publishEvent(ctx context.Context, eventType, reason, message string) {
-
- var event corev1.Event
+// NewK8EventPublisherWithRecorder creates a new k8s event publisher with recorder (exported for controller use)
+func NewK8EventPublisherWithRecorder(recorder record.EventRecorder, instance runtime.Object) (*K8EventPublisher, error) {
+ return newK8EventPublisher(recorder, instance)
+}
- // in the case of testing, client is not passed
- if k.client == nil {
+// publishEvent adds events to k8s using event recorder
+func (k *K8EventPublisher) publishEvent(ctx context.Context, eventType, reason, message string) {
+ if k == nil {
return
}
- // based on the custom resource instance type find name, type and create new event
- switch v := k.instance.(type) {
- case *enterpriseApi.Standalone:
- event = v.NewEvent(eventType, reason, message)
- case *enterpriseApiV3.LicenseMaster:
- event = v.NewEvent(eventType, reason, message)
- case *enterpriseApi.LicenseManager:
- event = v.NewEvent(eventType, reason, message)
- case *enterpriseApi.IndexerCluster:
- event = v.NewEvent(eventType, reason, message)
- case *enterpriseApi.ClusterManager:
- event = v.NewEvent(eventType, reason, message)
- case *enterpriseApiV3.ClusterMaster:
- event = v.NewEvent(eventType, reason, message)
- case *enterpriseApi.MonitoringConsole:
- event = v.NewEvent(eventType, reason, message)
- case *enterpriseApi.SearchHeadCluster:
- event = v.NewEvent(eventType, reason, message)
- default:
+ // in the case of testing, recorder is not passed
+ if k.recorder == nil {
return
}
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("PublishEvent")
- scopedLog.Info("publishing event", "reason", event.Reason, "message", event.Message)
+ scopedLog.Info("publishing event", "eventType", eventType, "reason", reason, "message", message)
- err := k.client.Create(ctx, &event)
- if err != nil {
- scopedLog.Error(err, "failed to record event, ignoring",
- "reason", event.Reason, "message", event.Message, "error", err)
- }
+ // Use the EventRecorder to emit the event
+ k.recorder.Event(k.instance, eventType, reason, message)
}
// Normal publish normal events to k8s
func (k *K8EventPublisher) Normal(ctx context.Context, reason, message string) {
- k.publishEvent(ctx, corev1.EventTypeNormal, reason, message)
+ k.publishEvent(ctx, "Normal", reason, message)
}
// Warning publish warning events to k8s
func (k *K8EventPublisher) Warning(ctx context.Context, reason, message string) {
- k.publishEvent(ctx, corev1.EventTypeWarning, reason, message)
+ k.publishEvent(ctx, "Warning", reason, message)
+}
+
+// GetEventPublisher returns an event publisher from context if available,
+// otherwise creates a new one. This is a shared helper to avoid code duplication.
+func GetEventPublisher(ctx context.Context, cr runtime.Object) *K8EventPublisher {
+ // First check if there's already an event publisher in context
+ if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil {
+ if eventPublisher, ok := pub.(*K8EventPublisher); ok {
+ return eventPublisher
+ }
+ }
+
+ // Otherwise, create a new one from the recorder in context
+ var recorder record.EventRecorder
+ if rec := ctx.Value(splcommon.EventRecorderKey); rec != nil {
+ recorder, _ = rec.(record.EventRecorder)
+ }
+ eventPublisher, _ := newK8EventPublisher(recorder, cr)
+ return eventPublisher
}
diff --git a/pkg/splunk/enterprise/events_test.go b/pkg/splunk/enterprise/events_test.go
index 18811bdd2..bc2087407 100644
--- a/pkg/splunk/enterprise/events_test.go
+++ b/pkg/splunk/enterprise/events_test.go
@@ -19,25 +19,17 @@ import (
"context"
"testing"
- "github.com/pkg/errors"
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
- spltest "github.com/splunk/splunk-operator/pkg/splunk/test"
-
- "sigs.k8s.io/controller-runtime/pkg/client/fake"
+ "k8s.io/client-go/tools/record"
)
-func init() {
-}
-
func TestClusterManagerEventPublisher(t *testing.T) {
-
- builder := fake.NewClientBuilder()
- c := builder.Build()
+ recorder := record.NewFakeRecorder(10)
cm := enterpriseApi.ClusterManager{}
- k8sevent, err := newK8EventPublisher(c, &cm)
+ k8sevent, err := newK8EventPublisher(recorder, &cm)
if err != nil {
t.Errorf("Unexpected error while creating new event publisher %v", err)
}
@@ -52,12 +44,10 @@ func TestClusterManagerEventPublisher(t *testing.T) {
}
func TestIndexerClusterEventPublisher(t *testing.T) {
-
- builder := fake.NewClientBuilder()
- c := builder.Build()
+ recorder := record.NewFakeRecorder(10)
cm := enterpriseApi.IndexerCluster{}
- k8sevent, err := newK8EventPublisher(c, &cm)
+ k8sevent, err := newK8EventPublisher(recorder, &cm)
if err != nil {
t.Errorf("Unexpected error while creating new event publisher %v", err)
}
@@ -67,12 +57,10 @@ func TestIndexerClusterEventPublisher(t *testing.T) {
}
func TestMonitoringConsoleEventPublisher(t *testing.T) {
-
- builder := fake.NewClientBuilder()
- c := builder.Build()
+ recorder := record.NewFakeRecorder(10)
cm := enterpriseApi.MonitoringConsole{}
- k8sevent, err := newK8EventPublisher(c, &cm)
+ k8sevent, err := newK8EventPublisher(recorder, &cm)
if err != nil {
t.Errorf("Unexpected error while creating new event publisher %v", err)
}
@@ -82,12 +70,10 @@ func TestMonitoringConsoleEventPublisher(t *testing.T) {
}
func TestSearchHeadClusterEventPublisher(t *testing.T) {
-
- builder := fake.NewClientBuilder()
- c := builder.Build()
+ recorder := record.NewFakeRecorder(10)
cm := enterpriseApi.SearchHeadCluster{}
- k8sevent, err := newK8EventPublisher(c, &cm)
+ k8sevent, err := newK8EventPublisher(recorder, &cm)
if err != nil {
t.Errorf("Unexpected error while creating new event publisher %v", err)
}
@@ -97,12 +83,10 @@ func TestSearchHeadClusterEventPublisher(t *testing.T) {
}
func TestStandaloneEventPublisher(t *testing.T) {
-
- builder := fake.NewClientBuilder()
- c := builder.Build()
+ recorder := record.NewFakeRecorder(10)
cm := enterpriseApi.Standalone{}
- k8sevent, err := newK8EventPublisher(c, &cm)
+ k8sevent, err := newK8EventPublisher(recorder, &cm)
if err != nil {
t.Errorf("Unexpected error while creating new event publisher %v", err)
}
@@ -112,25 +96,20 @@ func TestStandaloneEventPublisher(t *testing.T) {
// Negative testing
ctx := context.TODO()
- k8sevent.client = nil
+ k8sevent.recorder = nil
k8sevent.publishEvent(ctx, "", "", "")
- mockClient := spltest.NewMockClient()
- mockClient.InduceErrorKind[splcommon.MockClientInduceErrorCreate] = errors.New(splcommon.Rerr)
- k8sevent.client = mockClient
- k8sevent.publishEvent(ctx, "", "", "")
-
- k8sevent.instance = "randomString"
- k8sevent.publishEvent(ctx, "", "", "")
+ // Test with different instance type (this should work with EventRecorder)
+ k8sevent.recorder = recorder
+ k8sevent.instance = &cm
+ k8sevent.publishEvent(ctx, "Normal", "TestReason", "Test message")
}
func TestLicenseManagerEventPublisher(t *testing.T) {
-
- builder := fake.NewClientBuilder()
- c := builder.Build()
+ recorder := record.NewFakeRecorder(10)
lmanager := enterpriseApi.LicenseManager{}
- k8sevent, err := newK8EventPublisher(c, &lmanager)
+ k8sevent, err := newK8EventPublisher(recorder, &lmanager)
if err != nil {
t.Errorf("Unexpected error while creating new event publisher %v", err)
}
@@ -142,4 +121,35 @@ func TestLicenseManagerEventPublisher(t *testing.T) {
lmaster := enterpriseApiV3.LicenseMaster{}
k8sevent.instance = &lmaster
k8sevent.Normal(ctx, "", "")
+
+}
+
+func TestGetEventPublisher(t *testing.T) {
+ recorder := record.NewFakeRecorder(10)
+ cm := &enterpriseApi.ClusterManager{}
+
+ // Test 1: GetEventPublisher with recorder in context
+ ctx := context.WithValue(context.TODO(), splcommon.EventRecorderKey, recorder)
+ eventPublisher := GetEventPublisher(ctx, cm)
+ if eventPublisher == nil {
+ t.Error("Expected non-nil event publisher")
+ }
+
+ // Test 2: GetEventPublisher with existing publisher in context
+ ctx = context.WithValue(context.TODO(), splcommon.EventPublisherKey, eventPublisher)
+ eventPublisher2 := GetEventPublisher(ctx, cm)
+ if eventPublisher2 != eventPublisher {
+ t.Error("Expected to get same event publisher from context")
+ }
+
+ // Test 3: GetEventPublisher with no recorder in context
+ ctx = context.TODO()
+ eventPublisher3 := GetEventPublisher(ctx, cm)
+ if eventPublisher3 == nil {
+ t.Error("Expected non-nil event publisher even without recorder")
+ }
+
+ // Test 4: Verify publisher works (no panic)
+ eventPublisher.Normal(context.TODO(), "TestReason", "Test message")
+ eventPublisher.Warning(context.TODO(), "TestReason", "Test warning")
}
diff --git a/pkg/splunk/enterprise/finalizers.go b/pkg/splunk/enterprise/finalizers.go
index 574ccf093..9ecbd0136 100644
--- a/pkg/splunk/enterprise/finalizers.go
+++ b/pkg/splunk/enterprise/finalizers.go
@@ -56,6 +56,8 @@ func DeleteSplunkPvc(ctx context.Context, cr splcommon.MetaObject, c splcommon.C
components = append(components, splcommon.ClusterManager)
case "MonitoringConsole":
components = append(components, "monitoring-console")
+ case "IngestorCluster":
+ components = append(components, "ingestor")
default:
scopedLog.Info("Skipping PVC removal")
return nil
diff --git a/pkg/splunk/enterprise/finalizers_test.go b/pkg/splunk/enterprise/finalizers_test.go
index 92c46f1e0..369271200 100644
--- a/pkg/splunk/enterprise/finalizers_test.go
+++ b/pkg/splunk/enterprise/finalizers_test.go
@@ -54,6 +54,8 @@ func splunkDeletionTester(t *testing.T, cr splcommon.MetaObject, delete func(spl
component = "cluster-master"
case "MonitoringConsole":
component = "monitoring-console"
+ case "IngestorCluster":
+ component = "ingestor"
}
labelsB := map[string]string{
@@ -306,6 +308,19 @@ func splunkDeletionTester(t *testing.T, cr splcommon.MetaObject, delete func(spl
{MetaName: "*v4.IndexerCluster-test-stack1"},
{MetaName: "*v4.IndexerCluster-test-stack1"},
}
+ case "IngestorCluster":
+ mockCalls["Create"] = []spltest.MockFuncCall{
+ {MetaName: "*v1.Secret-test-splunk-test-secret"},
+ {MetaName: "*v1.ConfigMap-test-splunk-ingestor-stack1-configmap"},
+ }
+ mockCalls["Get"] = []spltest.MockFuncCall{
+ {MetaName: "*v1.Secret-test-splunk-test-secret"},
+ {MetaName: "*v1.Secret-test-splunk-test-secret"},
+ {MetaName: "*v1.Secret-test-splunk-test-secret"},
+ {MetaName: "*v1.ConfigMap-test-splunk-ingestor-stack1-configmap"},
+ {MetaName: "*v4.IngestorCluster-test-stack1"},
+ {MetaName: "*v4.IngestorCluster-test-stack1"},
+ }
}
}
}
@@ -340,6 +355,8 @@ func splunkPVCDeletionTester(t *testing.T, cr splcommon.MetaObject, delete func(
component = "cluster-manager"
case "MonitoringConsole":
component = "monitoring-console"
+ case "IngestorCluster":
+ component = "ingestor"
}
labels := map[string]string{
@@ -544,4 +561,15 @@ func TestDeleteSplunkPvcError(t *testing.T) {
if err == nil {
t.Errorf("Expected error")
}
+
+ // IngestorCluster
+ icCr := &enterpriseApi.IngestorCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "IngestorCluster",
+ },
+ }
+ err = DeleteSplunkPvc(ctx, icCr, c)
+ if err == nil {
+ t.Errorf("Expected error")
+ }
}
diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go
index 4215fa273..6700ccbfc 100644
--- a/pkg/splunk/enterprise/indexercluster.go
+++ b/pkg/splunk/enterprise/indexercluster.go
@@ -19,6 +19,7 @@ import (
"context"
"errors"
"fmt"
+ "log/slog"
"regexp"
"sort"
"strconv"
@@ -27,8 +28,8 @@ import (
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
- "github.com/go-logr/logr"
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
+ "github.com/splunk/splunk-operator/pkg/logging"
splclient "github.com/splunk/splunk-operator/pkg/splunk/client"
splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller"
@@ -36,13 +37,12 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
rclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
-// NewSplunkClientFunc funciton pointer type
+// NewSplunkClientFunc function pointer type
type NewSplunkClientFunc func(managementURI, username, password string) *splclient.SplunkClient
// ApplyIndexerClusterManager reconciles the state of a Splunk Enterprise indexer cluster.
@@ -53,9 +53,10 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller
Requeue: true,
RequeueAfter: time.Second * 5,
}
- reqLogger := log.FromContext(ctx)
- scopedLog := reqLogger.WithName("ApplyIndexerClusterManager").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
- eventPublisher, _ := newK8EventPublisher(client, cr)
+
+ logger := logging.FromContext(ctx).With("func", "ApplyIndexerClusterManager", "name", cr.GetName(), "namespace", cr.GetNamespace())
+
+ eventPublisher := GetEventPublisher(ctx, cr)
ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
cr.Kind = "IndexerCluster"
@@ -70,12 +71,17 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller
err = validateIndexerClusterSpec(ctx, client, cr)
if err != nil {
eventPublisher.Warning(ctx, "validateIndexerClusterSpec", fmt.Sprintf("validate indexercluster spec failed %s", err.Error()))
- scopedLog.Error(err, "Failed to validate indexercluster spec")
+ logger.ErrorContext(ctx, "Failed to validate indexercluster spec", "error", err.Error())
return result, err
}
// updates status after function completes
cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError
+ if cr.Status.Replicas < cr.Spec.Replicas {
+ logger.InfoContext(ctx, "Scaling up indexer cluster", "previousReplicas", cr.Status.Replicas, "newReplicas", cr.Spec.Replicas)
+ cr.Status.CredentialSecretVersion = "0"
+ cr.Status.ServiceAccount = ""
+ }
cr.Status.Replicas = cr.Spec.Replicas
cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName())
if cr.Status.Peers == nil {
@@ -91,7 +97,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller
// create or update general config resources
namespaceScopedSecret, err := ApplySplunkConfig(ctx, client, cr, cr.Spec.CommonSplunkSpec, SplunkIndexer)
if err != nil {
- scopedLog.Error(err, "create or update general config failed", "error", err.Error())
+ logger.ErrorContext(ctx, "create or update general config failed", "error", err.Error())
eventPublisher.Warning(ctx, "ApplySplunkConfig", fmt.Sprintf("create or update general config failed with error %s", err.Error()))
return result, err
}
@@ -111,11 +117,11 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller
cr.Status.ClusterManagerPhase = managerIdxCluster.Status.Phase
}
} else {
- scopedLog.Error(nil, "The configured clusterMasterRef doesn't exist", "clusterManagerRef", cr.Spec.ClusterManagerRef.Name)
+ logger.WarnContext(ctx, "The configured clusterMasterRef doesn't exist", "clusterManagerRef", cr.Spec.ClusterManagerRef.Name)
cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError
}
- mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient)
+ mgr := newIndexerClusterPodManager(logger, cr, namespaceScopedSecret, splclient.NewSplunkClient, client)
// Check if we have configured enough number(<= RF) of replicas
if mgr.cr.Status.ClusterManagerPhase == enterpriseApi.PhaseReady {
err = VerifyRFPeers(ctx, mgr, client)
@@ -241,11 +247,57 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller
// no need to requeue if everything is ready
if cr.Status.Phase == enterpriseApi.PhaseReady {
+ qosCfg, err := ResolveQueueAndObjectStorage(ctx, client, cr, cr.Spec.QueueRef, cr.Spec.ObjectStorageRef, cr.Spec.ServiceAccount)
+ if err != nil {
+ logger.ErrorContext(ctx, "Failed to resolve Queue/ObjectStorage config", "error", err.Error())
+ return result, err
+ }
+ logger.DebugContext(ctx, "Resolved Queue/ObjectStorage config", "queue", qosCfg.Queue, "objectStorage", qosCfg.OS, "version", qosCfg.Version, "serviceAccount", cr.Spec.ServiceAccount)
+
+ secretChanged := cr.Status.CredentialSecretVersion != qosCfg.Version
+ serviceAccountChanged := cr.Status.ServiceAccount != cr.Spec.ServiceAccount
+
+ logger.InfoContext(ctx, "Checking for changes", "previousCredentialSecretVersion", cr.Status.CredentialSecretVersion, "previousServiceAccount", cr.Status.ServiceAccount, "secretChanged", secretChanged, "serviceAccountChanged", serviceAccountChanged)
+
+ // If queue is updated
+ if cr.Spec.QueueRef.Name != "" {
+ if secretChanged || serviceAccountChanged {
+ mgr := newIndexerClusterPodManager(logger, cr, namespaceScopedSecret, splclient.NewSplunkClient, client)
+ err = mgr.updateIndexerConfFiles(ctx, cr, &qosCfg.Queue, &qosCfg.OS, qosCfg.AccessKey, qosCfg.SecretKey, client)
+ if err != nil {
+ eventPublisher.Warning(ctx, "UpdateConfFilesFailure", fmt.Sprintf("failed to update conf file for Queue/Pipeline config due to %s", err.Error()))
+ logger.ErrorContext(ctx, "Failed to update conf file for Queue/Pipeline config", "error", err.Error())
+ return result, err
+ }
+
+ eventPublisher.Normal(ctx, "QueueConfigUpdated",
+ fmt.Sprintf("Queue/Pipeline configuration updated for %d indexers", cr.Spec.Replicas))
+ logger.InfoContext(ctx, "Queue/Pipeline configuration updated", "readyReplicas", cr.Status.ReadyReplicas)
+
+ for i := int32(0); i < cr.Spec.Replicas; i++ {
+ idxcClient := mgr.getClient(ctx, i)
+ err = idxcClient.RestartSplunk()
+ if err != nil {
+ return result, err
+ }
+ logger.DebugContext(ctx, "Restarted splunk", "indexer", i)
+ }
+
+ eventPublisher.Normal(ctx, "IndexersRestarted",
+ fmt.Sprintf("Restarted Splunk on %d indexer pods", cr.Spec.Replicas))
+
+ cr.Status.CredentialSecretVersion = qosCfg.Version
+ cr.Status.ServiceAccount = cr.Spec.ServiceAccount
+
+ logger.InfoContext(ctx, "Updated status", "credentialSecretVersion", cr.Status.CredentialSecretVersion, "serviceAccount", cr.Status.ServiceAccount)
+ }
+ }
+
//update MC
//Retrieve monitoring console ref from CM Spec
cmMonitoringConsoleConfigRef, err := RetrieveCMSpec(ctx, client, cr)
if err != nil {
- eventPublisher.Warning(ctx, "RetrieveCMSpec", fmt.Sprintf("retrive cluster manager spec failed %s", err.Error()))
+ eventPublisher.Warning(ctx, "RetrieveCMSpec", fmt.Sprintf("retrieve cluster manager spec failed %s", err.Error()))
return result, err
}
if cmMonitoringConsoleConfigRef != "" {
@@ -261,7 +313,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller
}
}
if len(cr.Spec.MonitoringConsoleRef.Name) > 0 && (cr.Spec.MonitoringConsoleRef.Name != cmMonitoringConsoleConfigRef) {
- scopedLog.Info("Indexer Cluster CR should not specify monitoringConsoleRef and if specified, should be similar to cluster manager spec")
+ logger.WarnContext(ctx, "Indexer Cluster CR should not specify monitoringConsoleRef and if specified, should be similar to cluster manager spec")
}
}
if len(cr.Status.IndexerSecretChanged) > 0 {
@@ -276,7 +328,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller
// Disable maintenance mode
err = SetClusterMaintenanceMode(ctx, client, cr, false, cmPodName, podExecClient)
if err != nil {
- eventPublisher.Warning(ctx, "SetClusterMaintenanceMode", fmt.Sprintf("set cluster maintainance mode failed %s", err.Error()))
+ eventPublisher.Warning(ctx, "SetClusterMaintenanceMode", fmt.Sprintf("set cluster maintenance mode failed %s", err.Error()))
return result, err
}
}
@@ -288,7 +340,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller
result.Requeue = false
// Set indexer cluster CR as owner reference for clustermanager
- scopedLog.Info("Setting indexer cluster as owner for cluster manager")
+ logger.InfoContext(ctx, "Setting indexer cluster as owner for cluster manager")
if len(cr.Spec.ClusterManagerRef.Name) > 0 {
namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.Spec.ClusterManagerRef.Name)}
}
@@ -315,20 +367,27 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient,
Requeue: true,
RequeueAfter: time.Second * 5,
}
- reqLogger := log.FromContext(ctx)
- scopedLog := reqLogger.WithName("ApplyIndexerCluster")
- eventPublisher, _ := newK8EventPublisher(client, cr)
+ logger := logging.FromContext(ctx).With("func", "ApplyIndexerCluster", "name", cr.GetName(), "namespace", cr.GetNamespace())
+
+ eventPublisher := GetEventPublisher(ctx, cr)
cr.Kind = "IndexerCluster"
// validate and updates defaults for CR
err := validateIndexerClusterSpec(ctx, client, cr)
if err != nil {
+ eventPublisher.Warning(ctx, "validateIndexerClusterSpec", fmt.Sprintf("validate indexercluster spec failed %s", err.Error()))
+ logger.ErrorContext(ctx, "Failed to validate indexercluster spec", "error", err.Error())
return result, err
}
// updates status after function completes
cr.Status.Phase = enterpriseApi.PhaseError
cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError
+ if cr.Status.Replicas < cr.Spec.Replicas {
+ logger.InfoContext(ctx, "Scaling up indexer cluster", "previousReplicas", cr.Status.Replicas, "newReplicas", cr.Spec.Replicas)
+ cr.Status.CredentialSecretVersion = "0"
+ cr.Status.ServiceAccount = ""
+ }
cr.Status.Replicas = cr.Spec.Replicas
cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName())
if cr.Status.Peers == nil {
@@ -347,7 +406,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient,
// create or update general config resources
namespaceScopedSecret, err := ApplySplunkConfig(ctx, client, cr, cr.Spec.CommonSplunkSpec, SplunkIndexer)
if err != nil {
- scopedLog.Error(err, "create or update general config failed", "error", err.Error())
+ logger.ErrorContext(ctx, "create or update general config failed", "error", err.Error())
eventPublisher.Warning(ctx, "ApplySplunkConfig", fmt.Sprintf("create or update general config failed with error %s", err.Error()))
return result, err
}
@@ -370,7 +429,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient,
cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError
}
- mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient)
+ mgr := newIndexerClusterPodManager(logger, cr, namespaceScopedSecret, splclient.NewSplunkClient, client)
// Check if we have configured enough number(<= RF) of replicas
if mgr.cr.Status.ClusterMasterPhase == enterpriseApi.PhaseReady {
err = VerifyRFPeers(ctx, mgr, client)
@@ -497,11 +556,56 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient,
// no need to requeue if everything is ready
if cr.Status.Phase == enterpriseApi.PhaseReady {
+ qosCfg, err := ResolveQueueAndObjectStorage(ctx, client, cr, cr.Spec.QueueRef, cr.Spec.ObjectStorageRef, cr.Spec.ServiceAccount)
+ if err != nil {
+ logger.ErrorContext(ctx, "Failed to resolve Queue/ObjectStorage config", "error", err.Error())
+ return result, err
+ }
+ logger.DebugContext(ctx, "Resolved Queue/ObjectStorage config", "queue", qosCfg.Queue, "objectStorage", qosCfg.OS, "version", qosCfg.Version, "serviceAccount", cr.Spec.ServiceAccount)
+
+ secretChanged := cr.Status.CredentialSecretVersion != qosCfg.Version
+ serviceAccountChanged := cr.Status.ServiceAccount != cr.Spec.ServiceAccount
+
+ logger.InfoContext(ctx, "Checking for changes", "previousCredentialSecretVersion", cr.Status.CredentialSecretVersion, "previousServiceAccount", cr.Status.ServiceAccount, "secretChanged", secretChanged, "serviceAccountChanged", serviceAccountChanged)
+
+ if cr.Spec.QueueRef.Name != "" {
+ if secretChanged || serviceAccountChanged {
+ mgr := newIndexerClusterPodManager(logger, cr, namespaceScopedSecret, splclient.NewSplunkClient, client)
+ err = mgr.updateIndexerConfFiles(ctx, cr, &qosCfg.Queue, &qosCfg.OS, qosCfg.AccessKey, qosCfg.SecretKey, client)
+ if err != nil {
+ eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error()))
+ logger.ErrorContext(ctx, "Failed to update conf file for Queue/Pipeline config change after pod creation", "error", err.Error())
+ return result, err
+ }
+
+ eventPublisher.Normal(ctx, "QueueConfigUpdated",
+ fmt.Sprintf("Queue/Pipeline configuration updated for %d indexers", cr.Spec.Replicas))
+ logger.InfoContext(ctx, "Queue/Pipeline configuration updated", "readyReplicas", cr.Status.ReadyReplicas)
+
+ for i := int32(0); i < cr.Spec.Replicas; i++ {
+ idxcClient := mgr.getClient(ctx, i)
+ err = idxcClient.RestartSplunk()
+ if err != nil {
+ return result, err
+ }
+ logger.DebugContext(ctx, "Restarted splunk", "indexer", i)
+ }
+
+ eventPublisher.Normal(ctx, "IndexersRestarted",
+ fmt.Sprintf("Restarted Splunk on %d indexer pods", cr.Spec.Replicas))
+
+ cr.Status.CredentialSecretVersion = qosCfg.Version
+ cr.Status.ServiceAccount = cr.Spec.ServiceAccount
+
+ logger.InfoContext(ctx, "Updated status", "credentialSecretVersion", cr.Status.CredentialSecretVersion, "serviceAccount", cr.Status.ServiceAccount)
+ }
+ }
+
//update MC
//Retrieve monitoring console ref from CM Spec
cmMonitoringConsoleConfigRef, err := RetrieveCMSpec(ctx, client, cr)
if err != nil {
- eventPublisher.Warning(ctx, "RetrieveCMSpec", fmt.Sprintf("retrive cluster master spec failed %s", err.Error()))
+ eventPublisher.Warning(ctx, "RetrieveCMSpec", fmt.Sprintf("retrieve cluster master spec failed %s", err.Error()))
return result, err
}
if cmMonitoringConsoleConfigRef != "" {
@@ -517,7 +621,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient,
}
}
if len(cr.Spec.MonitoringConsoleRef.Name) > 0 && (cr.Spec.MonitoringConsoleRef.Name != cmMonitoringConsoleConfigRef) {
- scopedLog.Info("Indexer Cluster CR should not specify monitoringConsoleRef and if specified, should be similar to cluster master spec")
+ logger.WarnContext(ctx, "Indexer Cluster CR should not specify monitoringConsoleRef and if specified, should be similar to cluster master spec")
}
}
if len(cr.Status.IndexerSecretChanged) > 0 {
@@ -532,7 +636,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient,
// Disable maintenance mode
err = SetClusterMaintenanceMode(ctx, client, cr, false, cmPodName, podExecClient)
if err != nil {
- eventPublisher.Warning(ctx, "SetClusterMaintenanceMode", fmt.Sprintf("set cluster maintainance mode failed %s", err.Error()))
+ eventPublisher.Warning(ctx, "SetClusterMaintenanceMode", fmt.Sprintf("set cluster maintenance mode failed %s", err.Error()))
return result, err
}
}
@@ -544,7 +648,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient,
result.Requeue = false
// Set indexer cluster CR as owner reference for clustermaster
- scopedLog.Info("Setting indexer cluster as owner for cluster master")
+ logger.InfoContext(ctx, "Setting indexer cluster as owner for cluster master")
namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: GetSplunkStatefulsetName(SplunkClusterMaster, cr.Spec.ClusterMasterRef.Name)}
err = splctrl.SetStatefulSetOwnerRef(ctx, client, cr, namespacedName)
if err != nil {
@@ -569,19 +673,20 @@ var VerifyRFPeers = func(ctx context.Context, mgr indexerClusterPodManager, clie
// indexerClusterPodManager is used to manage the pods within an indexer cluster
type indexerClusterPodManager struct {
c splcommon.ControllerClient
- log logr.Logger
+ log *slog.Logger
cr *enterpriseApi.IndexerCluster
secrets *corev1.Secret
newSplunkClient func(managementURI, username, password string) *splclient.SplunkClient
}
// newIndexerClusterPodManager function to create pod manager this is added to write unit test case
-var newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) indexerClusterPodManager {
+var newIndexerClusterPodManager = func(log *slog.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) indexerClusterPodManager {
return indexerClusterPodManager{
log: log,
cr: cr,
secrets: secret,
newSplunkClient: newSplunkClient,
+ c: c,
}
}
@@ -625,6 +730,10 @@ func SetClusterMaintenanceMode(ctx context.Context, c splcommon.ControllerClient
// ApplyIdxcSecret checks if any of the indexer's have a different idxc_secret from namespace scoped secret and changes it
func ApplyIdxcSecret(ctx context.Context, mgr *indexerClusterPodManager, replicas int32, podExecClient splutil.PodExecClientImpl) error {
var indIdxcSecret string
+
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, mgr.cr)
+
// Get namespace scoped secret
namespaceSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, mgr.c, mgr.cr.GetNamespace())
if err != nil {
@@ -651,6 +760,7 @@ func ApplyIdxcSecret(ctx context.Context, mgr *indexerClusterPodManager, replica
nsIdxcSecret := string(namespaceSecret.Data[splcommon.IdxcSecret])
// Loop over all indexer pods and get individual pod's idxc password
+ howManyPodsHaveSecretChanged := 0
for i := int32(0); i <= replicas-1; i++ {
// Get Indexer's name
indexerPodName := GetSplunkStatefulsetPodName(SplunkIndexer, mgr.cr.GetName(), i)
@@ -716,13 +826,25 @@ func ApplyIdxcSecret(ctx context.Context, mgr *indexerClusterPodManager, replica
// Change idxc secret key
err = idxcClient.SetIdxcSecret(nsIdxcSecret)
if err != nil {
+ // Emit event for password sync failure
+ if eventPublisher != nil {
+ eventPublisher.Warning(ctx, "PasswordSyncFailed",
+ fmt.Sprintf("Password sync failed for pod '%s': %s. Check pod logs and secret format.", indexerPodName, err.Error()))
+ }
return err
}
scopedLog.Info("Changed idxc secret")
+ howManyPodsHaveSecretChanged += 1
+
// Restart splunk instance on pod
err = idxcClient.RestartSplunk()
if err != nil {
+ // Emit event for password sync failure
+ if eventPublisher != nil {
+ eventPublisher.Warning(ctx, "PasswordSyncFailed",
+ fmt.Sprintf("Password sync failed for pod '%s': %s. Check pod logs and secret format.", indexerPodName, err.Error()))
+ }
return err
}
scopedLog.Info("Restarted splunk")
@@ -776,6 +898,12 @@ func ApplyIdxcSecret(ctx context.Context, mgr *indexerClusterPodManager, replica
}
}
+ // Emit event for password sync completed
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "PasswordSyncCompleted",
+ fmt.Sprintf("Password synchronized for %d pods", howManyPodsHaveSecretChanged))
+ }
+
return nil
}
@@ -784,6 +912,12 @@ func (mgr *indexerClusterPodManager) Update(ctx context.Context, c splcommon.Con
var err error
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, mgr.cr)
+
+ // Track previous ready replicas for scaling events
+ previousReadyReplicas := mgr.cr.Status.ReadyReplicas
+
// Assign client
if mgr.c == nil {
mgr.c = c
@@ -795,7 +929,7 @@ func (mgr *indexerClusterPodManager) Update(ctx context.Context, c splcommon.Con
return enterpriseApi.PhaseError, err
}
} else {
- mgr.log.Info("Cluster Manager is not ready yet", "reason ", err)
+ mgr.log.InfoContext(ctx, "Cluster Manager is not ready yet", "error", err)
return enterpriseApi.PhaseError, err
}
@@ -816,7 +950,29 @@ func (mgr *indexerClusterPodManager) Update(ctx context.Context, c splcommon.Con
}
// manage scaling and updates
- return splctrl.UpdateStatefulSetPods(ctx, c, statefulSet, mgr, desiredReplicas)
+ phase, err := splctrl.UpdateStatefulSetPods(ctx, c, statefulSet, mgr, desiredReplicas)
+ if err != nil {
+ return phase, err
+ }
+
+ // Emit scale events when phase is ready and ready replicas changed to match desired
+ if phase == enterpriseApi.PhaseReady {
+ if mgr.cr.Status.ReadyReplicas == desiredReplicas && previousReadyReplicas != desiredReplicas {
+ if desiredReplicas > previousReadyReplicas {
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "ScaledUp",
+ fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", mgr.cr.GetName(), previousReadyReplicas, desiredReplicas))
+ }
+ } else if desiredReplicas < previousReadyReplicas {
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", mgr.cr.GetName(), previousReadyReplicas, desiredReplicas))
+ }
+ }
+ }
+ }
+
+ return phase, nil
}
// PrepareScaleDown for indexerClusterPodManager prepares indexer pod to be removed via scale down event; it returns true when ready
@@ -962,6 +1118,9 @@ func getSiteRepFactorOriginCount(siteRepFactor string) int32 {
// verifyRFPeers verifies the number of peers specified in the replicas section
// of IndexerClsuster CR. If it is less than RF, than we set it to RF.
func (mgr *indexerClusterPodManager) verifyRFPeers(ctx context.Context, c splcommon.ControllerClient) error {
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, mgr.cr)
+
if mgr.c == nil {
mgr.c = c
}
@@ -978,8 +1137,14 @@ func (mgr *indexerClusterPodManager) verifyRFPeers(ctx context.Context, c splcom
replicationFactor = clusterInfo.ReplicationFactor
}
- if mgr.cr.Spec.Replicas < replicationFactor {
- mgr.log.Info("Changing number of replicas as it is less than RF number of peers", "replicas", mgr.cr.Spec.Replicas)
+ requestedReplicas := mgr.cr.Spec.Replicas
+ if requestedReplicas < replicationFactor {
+ mgr.log.Info("Changing number of replicas as it is less than RF number of peers", "replicas", requestedReplicas)
+ // Emit event indicating scaling below RF is blocked/adjusted
+ if eventPublisher != nil {
+ eventPublisher.Warning(ctx, "ScalingBlockedRF",
+ fmt.Sprintf("Cannot scale below replication factor: %d replicas required, %d requested. Adjust replicationFactor or replicas.", replicationFactor, requestedReplicas))
+ }
mgr.cr.Spec.Replicas = replicationFactor
}
return nil
@@ -1007,6 +1172,9 @@ func (mgr *indexerClusterPodManager) updateStatus(ctx context.Context, statefulS
return fmt.Errorf("waiting for cluster manager to become ready")
}
+ oldInitialized := mgr.cr.Status.Initialized
+ oldIndexingReady := mgr.cr.Status.IndexingReady
+
// get indexer cluster info from cluster manager if it's ready
clusterInfo, err := GetClusterManagerInfoCall(ctx, mgr)
if err != nil {
@@ -1047,6 +1215,39 @@ func (mgr *indexerClusterPodManager) updateStatus(ctx context.Context, statefulS
mgr.cr.Status.Peers = mgr.cr.Status.Peers[:statefulSet.Status.Replicas]
}
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, mgr.cr)
+
+ // Emit events only on state transitions
+ if eventPublisher != nil {
+ // Compute current available peers for quorum-related events
+ var available int32
+ totalPeers := len(mgr.cr.Status.Peers)
+ for _, p := range mgr.cr.Status.Peers {
+ if p.Status == "Up" && p.Searchable {
+ available++
+ }
+ }
+
+ // Cluster just finished initializing when quorum becomes ready
+ if !oldIndexingReady && mgr.cr.Status.IndexingReady {
+ if !oldInitialized && mgr.cr.Status.Initialized {
+ eventPublisher.Normal(ctx, "ClusterInitialized",
+ fmt.Sprintf("Cluster '%s' initialized with %d peers", mgr.cr.GetName(), totalPeers))
+ }
+
+ // Cluster quorum just restored
+ eventPublisher.Normal(ctx, "ClusterQuorumRestored",
+ fmt.Sprintf("Cluster quorum restored: %d/%d peers available", available, totalPeers))
+ }
+
+ // Cluster quorum lost (transition out of indexing ready)
+ if oldIndexingReady && !mgr.cr.Status.IndexingReady {
+ eventPublisher.Warning(ctx, "ClusterQuorumLost",
+ fmt.Sprintf("Cluster quorum lost: %d/%d peers available. Investigate peer failures immediately.", available, totalPeers))
+ }
+ }
+
return nil
}
@@ -1078,11 +1279,12 @@ func validateIndexerClusterSpec(ctx context.Context, c splcommon.ControllerClien
len(cr.Spec.ClusterMasterRef.Namespace) > 0 && cr.Spec.ClusterMasterRef.Namespace != cr.GetNamespace() {
return fmt.Errorf("multisite cluster does not support cluster manager to be located in a different namespace")
}
+
return validateCommonSplunkSpec(ctx, c, &cr.Spec.CommonSplunkSpec, cr)
}
// helper function to get the list of IndexerCluster types in the current namespace
-func getIndexerClusterList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (enterpriseApi.IndexerClusterList, error) {
+func getIndexerClusterList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (enterpriseApi.IndexerClusterList, error) {
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("getIndexerClusterList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
@@ -1099,23 +1301,19 @@ func getIndexerClusterList(ctx context.Context, c splcommon.ControllerClient, cr
// RetrieveCMSpec finds monitoringConsole ref from cm spec
func RetrieveCMSpec(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (string, error) {
- var monitoringConsoleRef string = ""
-
if len(cr.Spec.ClusterMasterRef.Name) > 0 && len(cr.Spec.ClusterManagerRef.Name) == 0 {
namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: cr.Spec.ClusterMasterRef.Name}
var cmCR enterpriseApiV3.ClusterMaster
err := client.Get(ctx, namespacedName, &cmCR)
if err == nil {
- monitoringConsoleRef = cmCR.Spec.MonitoringConsoleRef.Name
- return monitoringConsoleRef, err
+ return cmCR.Spec.MonitoringConsoleRef.Name, nil
}
} else if len(cr.Spec.ClusterManagerRef.Name) > 0 && len(cr.Spec.ClusterMasterRef.Name) == 0 {
namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: cr.Spec.ClusterManagerRef.Name}
var cmCR enterpriseApi.ClusterManager
err := client.Get(ctx, namespacedName, &cmCR)
if err == nil {
- monitoringConsoleRef = cmCR.Spec.MonitoringConsoleRef.Name
- return monitoringConsoleRef, err
+ return cmCR.Spec.MonitoringConsoleRef.Name, nil
}
}
@@ -1159,6 +1357,68 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri
return extractedValue
}
+var newSplunkClientForQueuePipeline = splclient.NewSplunkClient
+
+// updateIndexerConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so
+func (mgr *indexerClusterPodManager) updateIndexerConfFiles(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string, k8s rclient.Client) error {
+ logger := logging.FromContext(ctx).With("func", "updateIndexerConfFiles", "name", newCR.GetName(), "namespace", newCR.GetNamespace())
+
+ // Only update config for pods that exist
+ readyReplicas := newCR.Status.ReadyReplicas
+
+ // List all pods for this IndexerCluster StatefulSet
+ var updateErr error
+ for n := 0; n < int(readyReplicas); n++ {
+ memberName := GetSplunkStatefulsetPodName(SplunkIndexer, newCR.GetName(), int32(n))
+ fqdnName := splcommon.GetServiceFQDN(newCR.GetNamespace(), fmt.Sprintf("%s.%s", memberName, GetSplunkServiceName(SplunkIndexer, newCR.GetName(), true)))
+ adminPwd, err := splutil.GetSpecificSecretTokenFromPod(ctx, k8s, memberName, newCR.GetNamespace(), "password")
+ if err != nil {
+ return err
+ }
+ splunkClient := newSplunkClientForQueuePipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd))
+
+ queueInputs, queueOutputs, pipelineInputs := getQueueAndPipelineInputsForIndexerConfFiles(queue, os, accessKey, secretKey)
+
+ for _, pbVal := range queueOutputs {
+ if !strings.Contains(pbVal[0], "access_key") && !strings.Contains(pbVal[0], "secret_key") {
+ logger.InfoContext(ctx, "Updating queue input in outputs.conf", "input", pbVal)
+ }
+ if err := splunkClient.UpdateConfFile(ctx, logger, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{pbVal}); err != nil {
+ updateErr = err
+ }
+ }
+
+ for _, pbVal := range queueInputs {
+ if !strings.Contains(pbVal[0], "access_key") && !strings.Contains(pbVal[0], "secret_key") {
+ logger.InfoContext(ctx, "Updating queue input in inputs.conf", "input", pbVal)
+ }
+ if err := splunkClient.UpdateConfFile(ctx, logger, "inputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{pbVal}); err != nil {
+ updateErr = err
+ }
+ }
+
+ for _, field := range pipelineInputs {
+ logger.InfoContext(ctx, "Updating pipeline input in default-mode.conf", "input", field)
+ if err := splunkClient.UpdateConfFile(ctx, logger, "default-mode", field[0], [][]string{{field[1], field[2]}}); err != nil {
+ updateErr = err
+ }
+ }
+ }
+
+ return updateErr
+}
+
+// getQueueAndPipelineInputsForIndexerConfFiles returns a list of queue and pipeline inputs for indexer pods conf files
+func getQueueAndPipelineInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (queueInputs, queueOutputs, pipelineInputs [][]string) {
+ // Queue Inputs
+ queueInputs, queueOutputs = getQueueAndObjectStorageInputsForIndexerConfFiles(queue, os, accessKey, secretKey)
+
+ // Pipeline inputs
+ pipelineInputs = getPipelineInputsForConfFile(true)
+
+ return
+}
+
// Tells if there is an image migration from 8.x.x to 9.x.x
func imageUpdatedTo9(previousImage string, currentImage string) bool {
// If there is no colon, version can't be detected
@@ -1169,3 +1429,62 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool {
currentVersion := strings.Split(currentImage, ":")[1]
return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9")
}
+
+// getQueueAndObjectStorageInputsForIndexerConfFiles returns a list of queue and object storage inputs for conf files
+func getQueueAndObjectStorageInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (inputs, outputs [][]string) {
+ queueProvider := ""
+ authRegion := ""
+ endpoint := ""
+ dlq := ""
+ if queue.Provider == "sqs" {
+ queueProvider = "sqs_smartbus"
+ } else if queue.Provider == "sqs_cp" {
+ queueProvider = "sqs_smartbus_cp"
+ }
+ if queue.Provider == "sqs" || queue.Provider == "sqs_cp" {
+ authRegion = queue.SQS.AuthRegion
+ endpoint = queue.SQS.Endpoint
+ dlq = queue.SQS.DLQ
+ }
+
+ path := ""
+ osEndpoint := ""
+ osProvider := ""
+ if os.Provider == "s3" {
+ if queueProvider == "sqs_smartbus" {
+ osProvider = "sqs_smartbus"
+ } else if queueProvider == "sqs_smartbus_cp" {
+ osProvider = "sqs_smartbus_cp"
+ }
+ osEndpoint = os.S3.Endpoint
+ path = os.S3.Path
+ if !strings.HasPrefix(path, "s3://") {
+ path = "s3://" + path
+ }
+ }
+
+ inputs = append(inputs,
+ []string{"remote_queue.type", queueProvider},
+ []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), authRegion},
+ []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), endpoint},
+ []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), osEndpoint},
+ []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), path},
+ []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), dlq},
+ []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"},
+ []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"},
+ )
+
+ // TODO: Handle credentials change
+ if accessKey != "" && secretKey != "" {
+ inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), accessKey})
+ inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), secretKey})
+ }
+
+ outputs = inputs
+ outputs = append(outputs,
+ []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"},
+ []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"},
+ )
+
+ return inputs, outputs
+}
diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go
index 92f562c5a..9ad921930 100644
--- a/pkg/splunk/enterprise/indexercluster_test.go
+++ b/pkg/splunk/enterprise/indexercluster_test.go
@@ -19,6 +19,7 @@ import (
"context"
"encoding/json"
"fmt"
+ "log/slog"
"net/http"
"os"
"path/filepath"
@@ -30,10 +31,12 @@ import (
"github.com/pkg/errors"
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/stretchr/testify/assert"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
pkgruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@@ -42,7 +45,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- "github.com/go-logr/logr"
+ "github.com/splunk/splunk-operator/pkg/logging"
splclient "github.com/splunk/splunk-operator/pkg/splunk/client"
splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
spltest "github.com/splunk/splunk-operator/pkg/splunk/test"
@@ -305,7 +308,7 @@ func TestGetMonitoringConsoleClient(t *testing.T) {
},
},
}
- scopedLog := logt.WithName("TestGetMonitoringConsoleClient")
+ scopedLog := logging.FromContext(context.Background()).With("func", "TestGetMonitoringConsoleClient", "name", "stack1", "namespace", "test")
secrets := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
@@ -334,7 +337,7 @@ func TestGetClusterManagerClient(t *testing.T) {
os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
ctx := context.TODO()
- scopedLog := logt.WithName("TestGetClusterManagerClient")
+ scopedLog := logging.FromContext(ctx).With("func", "TestGetClusterManagerClient", "name", "stack1", "namespace", "test")
cr := enterpriseApi.IndexerCluster{
TypeMeta: metav1.TypeMeta{
Kind: "IndexerCluster",
@@ -385,7 +388,7 @@ func TestGetClusterManagerClient(t *testing.T) {
func getIndexerClusterPodManager(method string, mockHandlers []spltest.MockHTTPHandler, mockSplunkClient *spltest.MockHTTPClient, replicas int32) *indexerClusterPodManager {
os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
- scopedLog := logt.WithName(method)
+ scopedLog := logging.FromContext(context.Background()).With("func", method, "name", "stack1", "namespace", "test")
cr := enterpriseApi.IndexerCluster{
TypeMeta: metav1.TypeMeta{
Kind: "IndexerCluster",
@@ -1027,7 +1030,7 @@ func TestSetClusterMaintenanceMode(t *testing.T) {
func TestApplyIdxcSecret(t *testing.T) {
os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
method := "ApplyIdxcSecret"
- scopedLog := logt.WithName(method)
+ scopedLog := logging.FromContext(context.Background()).With("func", method, "name", "stack1", "namespace", "test")
var initObjectList []client.Object
ctx := context.TODO()
@@ -1344,11 +1347,36 @@ func TestInvalidIndexerClusterSpec(t *testing.T) {
func TestGetIndexerStatefulSet(t *testing.T) {
os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ queue := enterpriseApi.Queue{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Queue",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "test-queue",
+ AuthRegion: "us-west-2",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ DLQ: "sqs-dlq-test",
+ },
+ },
+ }
+
cr := enterpriseApi.IndexerCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "stack1",
Namespace: "test",
},
+ Spec: enterpriseApi.IndexerClusterSpec{
+ QueueRef: corev1.ObjectReference{
+ Name: queue.Name,
+ },
+ },
}
ctx := context.TODO()
@@ -1533,41 +1561,93 @@ func TestIndexerClusterWithReadyState(t *testing.T) {
},
}
+ // Mock cluster config endpoint for VerifyRFPeers
+ type ClusterInfoEntry struct {
+ Content splclient.ClusterInfo `json:"content"`
+ }
+ clusterInfoResponse := struct {
+ Entry []ClusterInfoEntry `json:"entry"`
+ }{
+ Entry: []ClusterInfoEntry{
+ {
+ Content: splclient.ClusterInfo{
+ MultiSite: "false",
+ ReplicationFactor: 3,
+ SiteReplicationFactor: "",
+ },
+ },
+ },
+ }
+ response3, _ := json.Marshal(clusterInfoResponse)
+
response1, _ := json.Marshal(apiResponse1)
response2, _ := json.Marshal(apiResponse2)
wantRequest1, _ := http.NewRequest("GET", "https://splunk-test-cluster-manager-service.default.svc.cluster.local:8089/services/cluster/manager/info?count=0&output_mode=json", nil)
wantRequest2, _ := http.NewRequest("GET", "https://splunk-test-cluster-manager-service.default.svc.cluster.local:8089/services/cluster/manager/peers?count=0&output_mode=json", nil)
+ wantRequest3, _ := http.NewRequest("GET", "https://splunk-test-cluster-manager-service.default.svc.cluster.local:8089/services/cluster/config?count=0&output_mode=json", nil)
mclient.AddHandler(wantRequest1, 200, string(response1), nil)
mclient.AddHandler(wantRequest2, 200, string(response2), nil)
+ mclient.AddHandler(wantRequest3, 200, string(response3), nil)
- // mock the verify RF peer function
- VerifyRFPeers = func(ctx context.Context, mgr indexerClusterPodManager, client splcommon.ControllerClient) error {
- return nil
+ // Mock GetSpecificSecretTokenFromPod to return a dummy password
+ // This allows VerifyRFPeers to execute its real logic with HTTP calls mocked via MockHTTPClient
+ savedGetSpecificSecretTokenFromPod := splutil.GetSpecificSecretTokenFromPod
+ defer func() { splutil.GetSpecificSecretTokenFromPod = savedGetSpecificSecretTokenFromPod }()
+ splutil.GetSpecificSecretTokenFromPod = func(ctx context.Context, c splcommon.ControllerClient, podName string, namespace string, secretToken string) (string, error) {
+ return "dummypassword", nil
}
- newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) indexerClusterPodManager {
+ savedNewIndexerClusterPodManager := newIndexerClusterPodManager
+ defer func() { newIndexerClusterPodManager = savedNewIndexerClusterPodManager }()
+ newIndexerClusterPodManager = func(log *slog.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) indexerClusterPodManager {
return indexerClusterPodManager{
log: log,
cr: cr,
secrets: secret,
newSplunkClient: func(managementURI, username, password string) *splclient.SplunkClient {
- c := splclient.NewSplunkClient(managementURI, username, password)
- c.Client = mclient
- return c
+ sc := splclient.NewSplunkClient(managementURI, username, password)
+ sc.Client = mclient
+ return sc
},
+ c: c,
}
}
+ // Initialize GlobalResourceTracker to enable app framework
+ initGlobalResourceTracker()
+
// create directory for app framework
newpath := filepath.Join("/tmp", "appframework")
_ = os.MkdirAll(newpath, os.ModePerm)
// adding getapplist to fix test case
+ savedGetAppsList := GetAppsList
+ defer func() { GetAppsList = savedGetAppsList }()
GetAppsList = func(ctx context.Context, remoteDataClientMgr RemoteDataClientManager) (splclient.RemoteDataListResponse, error) {
RemoteDataListResponse := splclient.RemoteDataListResponse{}
return RemoteDataListResponse, nil
}
+ // Mock GetPodExecClient to return a mock client that simulates pod operations locally
+ savedGetPodExecClient := splutil.GetPodExecClient
+ splutil.GetPodExecClient = func(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) splutil.PodExecClientImpl {
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: cr,
+ TargetPodName: targetPodName,
+ }
+ // Add mock responses for common commands
+ ctx := context.TODO()
+ // Mock mkdir command (used by createDirOnSplunkPods)
+ mockClient.AddMockPodExecReturnContext(ctx, "mkdir -p", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+ return mockClient
+ }
+ defer func() { splutil.GetPodExecClient = savedGetPodExecClient }()
+
sch := pkgruntime.NewScheme()
utilruntime.Must(clientgoscheme.AddToScheme(sch))
utilruntime.Must(corev1.AddToScheme(sch))
@@ -1707,7 +1787,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) {
}
// call reconciliation
- _, err = ApplyClusterManager(ctx, c, clustermanager)
+ _, err = ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err)
debug.PrintStack()
@@ -1786,7 +1866,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) {
}
// call reconciliation
- _, err = ApplyClusterManager(ctx, c, clustermanager)
+ _, err = ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err)
debug.PrintStack()
@@ -2020,3 +2100,1400 @@ func TestImageUpdatedTo9(t *testing.T) {
t.Errorf("Should not have detected an upgrade from 8 to 9, there is no version")
}
}
+
+func TestGetQueueAndPipelineInputsForIndexerConfFiles(t *testing.T) {
+ provider := "sqs_smartbus"
+
+ queue := &enterpriseApi.Queue{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Queue",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "test-queue",
+ AuthRegion: "us-west-2",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ DLQ: "sqs-dlq-test",
+ VolList: []enterpriseApi.VolumeSpec{
+ {SecretRef: "secret"},
+ },
+ },
+ },
+ }
+
+ os := &enterpriseApi.ObjectStorage{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ObjectStorage",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "os",
+ },
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-west-2.amazonaws.com",
+ Path: "bucket/key",
+ },
+ },
+ }
+
+ key := "key"
+ secret := "secret"
+
+ queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getQueueAndPipelineInputsForIndexerConfFiles(&queue.Spec, &os.Spec, key, secret)
+ assert.Equal(t, 10, len(queueChangedFieldsInputs))
+ assert.Equal(t, [][]string{
+ {"remote_queue.type", provider},
+ {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion},
+ {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), "s3://" + os.Spec.S3.Path},
+ {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ},
+ {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"},
+ {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"},
+ {fmt.Sprintf("remote_queue.%s.access_key", provider), key},
+ {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret},
+ }, queueChangedFieldsInputs)
+
+ assert.Equal(t, 12, len(queueChangedFieldsOutputs))
+ assert.Equal(t, [][]string{
+ {"remote_queue.type", provider},
+ {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion},
+ {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), "s3://" + os.Spec.S3.Path},
+ {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ},
+ {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"},
+ {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"},
+ {fmt.Sprintf("remote_queue.%s.access_key", provider), key},
+ {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret},
+ {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"},
+ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"},
+ }, queueChangedFieldsOutputs)
+
+ assert.Equal(t, 5, len(pipelineChangedFields))
+ assert.Equal(t, [][]string{
+ {"pipeline:remotequeueruleset", "disabled", "false"},
+ {"pipeline:ruleset", "disabled", "true"},
+ {"pipeline:remotequeuetyping", "disabled", "false"},
+ {"pipeline:remotequeueoutput", "disabled", "false"},
+ {"pipeline:typing", "disabled", "true"},
+ }, pipelineChangedFields)
+}
+
+func TestGetQueueAndPipelineInputsForIndexerConfFilesSQSCP(t *testing.T) {
+ provider := "sqs_smartbus_cp"
+
+ queue := &enterpriseApi.Queue{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Queue",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs_cp",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "test-queue",
+ AuthRegion: "us-west-2",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ DLQ: "sqs-dlq-test",
+ VolList: []enterpriseApi.VolumeSpec{
+ {SecretRef: "secret"},
+ },
+ },
+ },
+ }
+
+ os := &enterpriseApi.ObjectStorage{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ObjectStorage",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "os",
+ },
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-west-2.amazonaws.com",
+ Path: "bucket/key",
+ },
+ },
+ }
+
+ key := "key"
+ secret := "secret"
+
+ queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getQueueAndPipelineInputsForIndexerConfFiles(&queue.Spec, &os.Spec, key, secret)
+ assert.Equal(t, 10, len(queueChangedFieldsInputs))
+ assert.Equal(t, [][]string{
+ {"remote_queue.type", provider},
+ {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion},
+ {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), "s3://" + os.Spec.S3.Path},
+ {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ},
+ {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"},
+ {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"},
+ {fmt.Sprintf("remote_queue.%s.access_key", provider), key},
+ {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret},
+ }, queueChangedFieldsInputs)
+
+ assert.Equal(t, 12, len(queueChangedFieldsOutputs))
+ assert.Equal(t, [][]string{
+ {"remote_queue.type", provider},
+ {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion},
+ {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), "s3://" + os.Spec.S3.Path},
+ {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ},
+ {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"},
+ {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"},
+ {fmt.Sprintf("remote_queue.%s.access_key", provider), key},
+ {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret},
+ {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"},
+ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"},
+ }, queueChangedFieldsOutputs)
+
+ assert.Equal(t, 5, len(pipelineChangedFields))
+ assert.Equal(t, [][]string{
+ {"pipeline:remotequeueruleset", "disabled", "false"},
+ {"pipeline:ruleset", "disabled", "true"},
+ {"pipeline:remotequeuetyping", "disabled", "false"},
+ {"pipeline:remotequeueoutput", "disabled", "false"},
+ {"pipeline:typing", "disabled", "true"},
+ }, pipelineChangedFields)
+}
+
+func TestUpdateIndexerConfFiles(t *testing.T) {
+ c := spltest.NewMockClient()
+ ctx := context.TODO()
+
+ // Object definitions
+ provider := "sqs_smartbus"
+
+ accessKey := "accessKey"
+ secretKey := "secretKey"
+
+ queue := &enterpriseApi.Queue{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Queue",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "test-queue",
+ AuthRegion: "us-west-2",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ DLQ: "sqs-dlq-test",
+ },
+ },
+ }
+ c.Create(ctx, queue)
+
+ os := enterpriseApi.ObjectStorage{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ObjectStorage",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "os",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-west-2.amazonaws.com",
+ Path: "bucket/key",
+ },
+ },
+ }
+ c.Create(ctx, &os)
+
+ cr := &enterpriseApi.IndexerCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "IndexerCluster",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.IndexerClusterSpec{
+ QueueRef: corev1.ObjectReference{
+ Name: queue.Name,
+ },
+ ObjectStorageRef: corev1.ObjectReference{
+ Name: os.Name,
+ Namespace: os.Namespace,
+ },
+ },
+ Status: enterpriseApi.IndexerClusterStatus{
+ ReadyReplicas: 3,
+ CredentialSecretVersion: "123",
+ },
+ }
+ c.Create(ctx, cr)
+
+ pod0 := &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-test-indexer-0",
+ Namespace: "test",
+ Labels: map[string]string{
+ "app.kubernetes.io/instance": "splunk-test-indexer",
+ },
+ },
+ Spec: corev1.PodSpec{
+ Volumes: []corev1.Volume{
+ {
+ Name: "dummy-volume",
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{},
+ },
+ },
+ {
+ Name: "mnt-splunk-secrets",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test-secrets",
+ },
+ },
+ },
+ },
+ },
+ Status: corev1.PodStatus{
+ Phase: corev1.PodRunning,
+ ContainerStatuses: []corev1.ContainerStatus{
+ {Ready: true},
+ },
+ },
+ }
+
+ pod1 := pod0.DeepCopy()
+ pod1.ObjectMeta.Name = "splunk-test-indexer-1"
+
+ pod2 := pod0.DeepCopy()
+ pod2.ObjectMeta.Name = "splunk-test-indexer-2"
+
+ c.Create(ctx, pod0)
+ c.Create(ctx, pod1)
+ c.Create(ctx, pod2)
+
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-secrets",
+ Namespace: "test",
+ },
+ Data: map[string][]byte{
+ "password": []byte("dummy"),
+ },
+ }
+
+ // Negative test case: secret not found
+ mgr := &indexerClusterPodManager{}
+ err := mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c)
+ assert.NotNil(t, err)
+
+ // Mock secret
+ c.Create(ctx, secret)
+
+ mockHTTPClient := &spltest.MockHTTPClient{}
+
+ // Negative test case: failure in creating remote queue stanza
+ mgr = newTestIndexerQueuePipelineManager(mockHTTPClient)
+
+ err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c)
+ assert.NotNil(t, err)
+
+ // outputs.conf
+ propertyKVList := [][]string{
+ {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion},
+ {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path},
+ {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ},
+ {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"},
+ {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"},
+ }
+ propertyKVListOutputs := propertyKVList
+
+ propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"})
+ propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"})
+
+ body := buildFormBody(propertyKVListOutputs)
+ addRemoteQueueHandlersForIndexer(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body)
+
+ // Negative test case: failure in creating remote queue stanza
+ mgr = newTestIndexerQueuePipelineManager(mockHTTPClient)
+
+ err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c)
+ assert.NotNil(t, err)
+
+ // inputs.conf
+ body = buildFormBody(propertyKVList)
+ addRemoteQueueHandlersForIndexer(mockHTTPClient, cr, &queue.Spec, "conf-inputs", body)
+
+ // Negative test case: failure in updating remote queue stanza
+ mgr = newTestIndexerQueuePipelineManager(mockHTTPClient)
+
+ err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c)
+ assert.NotNil(t, err)
+
+ // default-mode.conf
+ propertyKVList = [][]string{
+ {"pipeline:remotequeueruleset", "disabled", "false"},
+ {"pipeline:ruleset", "disabled", "true"},
+ {"pipeline:remotequeuetyping", "disabled", "false"},
+ {"pipeline:remotequeueoutput", "disabled", "false"},
+ {"pipeline:typing", "disabled", "true"},
+ }
+
+ for i := 0; i < int(cr.Status.ReadyReplicas); i++ {
+ podName := fmt.Sprintf("splunk-test-indexer-%d", i)
+ baseURL := fmt.Sprintf("https://%s.splunk-test-indexer-headless.test.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName)
+
+ for _, field := range propertyKVList {
+ req, _ := http.NewRequest("POST", baseURL, strings.NewReader(fmt.Sprintf("name=%s", field[0])))
+ mockHTTPClient.AddHandler(req, 200, "", nil)
+
+ updateURL := fmt.Sprintf("%s/%s", baseURL, field[0])
+ req, _ = http.NewRequest("POST", updateURL, strings.NewReader(fmt.Sprintf("%s=%s", field[1], field[2])))
+ mockHTTPClient.AddHandler(req, 200, "", nil)
+ }
+ }
+
+ mgr = newTestIndexerQueuePipelineManager(mockHTTPClient)
+
+ err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c)
+ assert.Nil(t, err)
+}
+
+func buildFormBody(pairs [][]string) string {
+ var b strings.Builder
+ for i, kv := range pairs {
+ if len(kv) < 2 {
+ continue
+ }
+ fmt.Fprintf(&b, "%s=%s", kv[0], kv[1])
+ if i < len(pairs)-1 {
+ b.WriteByte('&')
+ }
+ }
+ return b.String()
+}
+
+func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, confName, body string) {
+ for i := 0; i < int(cr.Status.ReadyReplicas); i++ {
+ podName := fmt.Sprintf("splunk-%s-indexer-%d", cr.GetName(), i)
+ baseURL := fmt.Sprintf(
+ "https://%s.splunk-%s-indexer-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/%s",
+ podName, cr.GetName(), cr.GetNamespace(), confName,
+ )
+
+ createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.SQS.Name))
+ reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody))
+ mockHTTPClient.AddHandler(reqCreate, 200, "", nil)
+
+ updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.SQS.Name))
+ reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body))
+ mockHTTPClient.AddHandler(reqUpdate, 200, "", nil)
+ }
+}
+
+func newTestIndexerQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager {
+ newSplunkClientForQueuePipeline = func(uri, user, pass string) *splclient.SplunkClient {
+ return &splclient.SplunkClient{
+ ManagementURI: uri,
+ Username: user,
+ Password: pass,
+ Client: mockHTTPClient,
+ }
+ }
+ return &indexerClusterPodManager{
+ newSplunkClient: newSplunkClientForQueuePipeline,
+ }
+}
+
+func TestApplyIndexerClusterManager_Queue_Success(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ ctx := context.TODO()
+
+ scheme := runtime.NewScheme()
+ _ = enterpriseApi.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+ _ = appsv1.AddToScheme(scheme)
+ c := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ // Object definitions
+ queue := &enterpriseApi.Queue{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Queue",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "test-queue",
+ AuthRegion: "us-west-2",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ DLQ: "sqs-dlq-test",
+ },
+ },
+ }
+ c.Create(ctx, queue)
+
+ os := &enterpriseApi.ObjectStorage{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ObjectStorage",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "os",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-west-2.amazonaws.com",
+ Path: "bucket/key",
+ },
+ },
+ }
+ c.Create(ctx, os)
+
+ cm := &enterpriseApi.ClusterManager{
+ TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cm",
+ Namespace: "test",
+ },
+ Status: enterpriseApi.ClusterManagerStatus{
+ Phase: enterpriseApi.PhaseReady,
+ },
+ }
+ c.Create(ctx, cm)
+
+ cr := &enterpriseApi.IndexerCluster{
+ TypeMeta: metav1.TypeMeta{Kind: "IndexerCluster"},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 1,
+ QueueRef: corev1.ObjectReference{
+ Name: queue.Name,
+ Namespace: queue.Namespace,
+ },
+ ObjectStorageRef: corev1.ObjectReference{
+ Name: os.Name,
+ Namespace: os.Namespace,
+ },
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ ClusterManagerRef: corev1.ObjectReference{
+ Name: "cm",
+ },
+ Mock: true,
+ },
+ },
+ Status: enterpriseApi.IndexerClusterStatus{
+ Phase: enterpriseApi.PhaseReady,
+ },
+ }
+ c.Create(ctx, cr)
+
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-secrets",
+ Namespace: "test",
+ },
+ Data: map[string][]byte{
+ "password": []byte("dummy"),
+ },
+ }
+ c.Create(ctx, secret)
+
+ cmPod := &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-cm-cluster-manager-0",
+ Namespace: "test",
+ },
+ Spec: corev1.PodSpec{
+ Volumes: []corev1.Volume{
+ {
+ Name: "mnt-splunk-secrets",
+ VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{
+ SecretName: "test-secrets",
+ }},
+ },
+ },
+ },
+ Status: corev1.PodStatus{
+ Phase: corev1.PodRunning,
+ ContainerStatuses: []corev1.ContainerStatus{
+ {Ready: true},
+ },
+ },
+ }
+ c.Create(ctx, cmPod)
+
+ pod0 := &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-test-indexer-0",
+ Namespace: "test",
+ Labels: map[string]string{
+ "app.kubernetes.io/instance": "splunk-test-indexer",
+ },
+ },
+ Spec: corev1.PodSpec{
+ Volumes: []corev1.Volume{
+ {
+ Name: "dummy-volume",
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{},
+ },
+ },
+ {
+ Name: "mnt-splunk-secrets",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test-secrets",
+ },
+ },
+ },
+ },
+ },
+ Status: corev1.PodStatus{
+ Phase: corev1.PodRunning,
+ ContainerStatuses: []corev1.ContainerStatus{
+ {Ready: true},
+ },
+ },
+ }
+ c.Create(ctx, pod0)
+
+ replicas := int32(1)
+ sts := &appsv1.StatefulSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-test-indexer",
+ Namespace: "test",
+ },
+ Spec: appsv1.StatefulSetSpec{
+ Replicas: &replicas,
+ },
+ Status: appsv1.StatefulSetStatus{
+ Replicas: 1,
+ ReadyReplicas: 1,
+ UpdatedReplicas: 1,
+ },
+ }
+ c.Create(ctx, sts)
+
+ svc := &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-test-indexer-headless",
+ Namespace: "test",
+ },
+ }
+ c.Create(ctx, svc)
+
+ // outputs.conf
+ mockHTTPClient := &spltest.MockHTTPClient{}
+
+ base := "https://splunk-test-indexer-0.splunk-test-indexer-headless.test.svc.cluster.local:8089/servicesNS/nobody/system/configs"
+ q := "remote_queue:test-queue"
+
+ mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs", base), "name="+q), 200, "", nil)
+ mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs/%s", base, q), ""), 200, "", nil)
+
+ // inputs.conf
+ mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs", base), "name="+q), 200, "", nil)
+ mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs/%s", base, q), ""), 200, "", nil)
+
+ // default-mode.conf
+ pipelineFields := []string{
+ "pipeline:remotequeueruleset",
+ "pipeline:ruleset",
+ "pipeline:remotequeuetyping",
+ "pipeline:remotequeueoutput",
+ "pipeline:typing",
+ }
+ for range pipelineFields {
+ mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-default-mode", base), "name="), 200, "", nil)
+ mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-default-mode/", base), ""), 200, "", nil)
+ }
+
+ res, err := ApplyIndexerCluster(ctx, c, cr)
+ assert.NotNil(t, res)
+ assert.Nil(t, err)
+}
+
+func mustReq(method, url, body string) *http.Request {
+ var r *http.Request
+ var err error
+ if body != "" {
+ r, err = http.NewRequest(method, url, strings.NewReader(body))
+ } else {
+ r, err = http.NewRequest(method, url, nil)
+ }
+ if err != nil {
+ panic(err)
+ }
+ return r
+
+}
+
+func TestPasswordSyncCompleted(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ sch := pkgruntime.NewScheme()
+ utilruntime.Must(clientgoscheme.AddToScheme(sch))
+ utilruntime.Must(corev1.AddToScheme(sch))
+ utilruntime.Must(enterpriseApi.AddToScheme(sch))
+
+ builder := fake.NewClientBuilder().
+ WithScheme(sch).
+ WithStatusSubresource(&enterpriseApi.ClusterManager{}).
+ WithStatusSubresource(&enterpriseApi.IndexerCluster{})
+
+ client := builder.Build()
+ ctx := context.TODO()
+
+ // Create a mock event recorder to capture events
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+
+ cm := enterpriseApi.ClusterManager{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ClusterManager",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cm",
+ Namespace: "test",
+ },
+ }
+ cm.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("ClusterManager"))
+
+ err := client.Create(ctx, &cm)
+ if err != nil {
+ t.Fatalf("Failed to create ClusterManager: %v", err)
+ }
+
+ idxc := enterpriseApi.IndexerCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "IndexerCluster",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "idxc",
+ Namespace: cm.GetNamespace(),
+ },
+ Spec: enterpriseApi.IndexerClusterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ ClusterManagerRef: corev1.ObjectReference{
+ Name: cm.GetName(),
+ },
+ },
+ },
+ }
+ idxc.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("IndexerCluster"))
+
+ err = client.Create(ctx, &idxc)
+ if err != nil {
+ t.Fatalf("Failed to create IndexerCluster: %v", err)
+ }
+
+ // Create namespace scoped secret so ApplyIdxcSecret has something to work with
+ nsSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, client, cm.GetNamespace())
+ if err != nil {
+ t.Fatalf("Failed to apply namespace scoped secret: %v", err)
+ }
+
+ // Set CR status resource version to a stale value so ApplyIdxcSecret does not early-return
+ idxc.Status.NamespaceSecretResourceVersion = nsSecret.ResourceVersion + "-old"
+
+ // Initialize a minimal pod manager for ApplyIdxcSecret
+ mgr := &indexerClusterPodManager{
+ c: client,
+ log: logging.FromContext(ctx).With("func", "TestPasswordSyncCompleted", "name", idxc.GetName(), "namespace", idxc.GetNamespace()),
+ cr: &idxc,
+ }
+
+ // Use a mock PodExec client; replicas will be 0 so it won't be exercised
+ var mockPodExecClient *spltest.MockPodExecClient = &spltest.MockPodExecClient{}
+
+ // Add event publisher to context so ApplyIdxcSecret can emit events
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ // Call ApplyIdxcSecret; with 0 replicas it will complete without touching pods,
+ // but still emit the PasswordSyncCompleted event
+ err = ApplyIdxcSecret(ctx, mgr, 0, mockPodExecClient)
+ if err != nil {
+ t.Errorf("Couldn't apply idxc secret %s", err.Error())
+ }
+
+ // Check that PasswordSyncCompleted event was published
+ foundEvent := false
+ for _, event := range recorder.events {
+ if event.reason == "PasswordSyncCompleted" {
+ foundEvent = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, "Password synchronized") {
+ t.Errorf("Expected event message to contain 'Password synchronized', got: %s", event.message)
+ }
+ break
+ }
+ }
+ if !foundEvent {
+ t.Errorf("Expected PasswordSyncCompleted event to be published")
+ }
+}
+
+func TestClusterQuorumRestoredClusterInitialized(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ sch := pkgruntime.NewScheme()
+ utilruntime.Must(clientgoscheme.AddToScheme(sch))
+ utilruntime.Must(corev1.AddToScheme(sch))
+ utilruntime.Must(enterpriseApi.AddToScheme(sch))
+
+ builder := fake.NewClientBuilder().
+ WithScheme(sch).
+ WithStatusSubresource(&enterpriseApi.ClusterManager{}).
+ WithStatusSubresource(&enterpriseApi.IndexerCluster{})
+
+ client := builder.Build()
+ ctx := context.TODO()
+
+ // Create a mock event recorder to capture events
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+
+ cm := enterpriseApi.ClusterManager{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ClusterManager",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "manager1",
+ Namespace: "test",
+ },
+ }
+ cm.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("ClusterManager"))
+
+ err := client.Create(ctx, &cm)
+ if err != nil {
+ t.Fatalf("Failed to create ClusterManager: %v", err)
+ }
+
+ idxc := enterpriseApi.IndexerCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "IndexerCluster",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "idxc",
+ Namespace: cm.GetNamespace(),
+ },
+ Spec: enterpriseApi.IndexerClusterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ ClusterManagerRef: corev1.ObjectReference{
+ Name: cm.GetName(),
+ },
+ },
+ },
+ }
+ idxc.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("IndexerCluster"))
+
+ err = client.Create(ctx, &idxc)
+ if err != nil {
+ t.Fatalf("Failed to create IndexerCluster: %v", err)
+ }
+
+ // Build mock HTTP handlers for a healthy cluster manager info/peers response
+ mockHandlers := []spltest.MockHTTPHandler{
+ {
+ Method: "GET",
+ URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/manager/info?count=0&output_mode=json",
+ Status: 200,
+ Err: nil,
+ Body: splcommon.TestIndexerClusterPodManagerInfo,
+ },
+ {
+ Method: "GET",
+ URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/manager/peers?count=0&output_mode=json",
+ Status: 200,
+ Err: nil,
+ Body: splcommon.TestIndexerClusterPodManagerPeer,
+ },
+ }
+
+ // Create mock Splunk client and indexerClusterPodManager using existing helper
+ mockSplunkClient := &spltest.MockHTTPClient{}
+ mockSplunkClient.AddHandlers(mockHandlers...)
+
+ mgr := getIndexerClusterPodManager("TestClusterQuorumRestoredClusterInitialized", mockHandlers, mockSplunkClient, 3)
+ replicas := int32(3)
+ ss := &appsv1.StatefulSet{
+ Status: appsv1.StatefulSetStatus{
+ Replicas: replicas,
+ ReadyReplicas: replicas,
+ },
+ }
+
+ // Wire a mock k8s client and event publisher into context
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ // Use a mock k8s client as in other updateStatus tests
+ c := spltest.NewMockClient()
+ mgr.c = c
+
+ // Ensure initial status is not indexing ready so we see a transition
+ mgr.cr.Status.IndexingReady = false
+
+ // Call updateStatus, which should transition to indexing ready and emit the event
+ err = mgr.updateStatus(ctx, ss)
+ if err != nil {
+ t.Fatalf("updateStatus returned unexpected error: %v", err)
+ }
+
+ // Check that both ClusterInitialized and ClusterQuorumRestored events were published
+ clusterInitialized := false
+ quorumRestored := false
+ for _, event := range recorder.events {
+ if event.reason == "ClusterInitialized" {
+ clusterInitialized = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type for ClusterInitialized, got %s", event.eventType)
+ }
+ if quorumRestored {
+ break
+ }
+ }
+ if event.reason == "ClusterQuorumRestored" {
+ quorumRestored = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type for ClusterQuorumRestored, got %s", event.eventType)
+ }
+ if clusterInitialized {
+ break
+ }
+ }
+ }
+ if !clusterInitialized {
+ t.Errorf("Expected ClusterInitialized event to be published")
+ }
+ if !quorumRestored {
+ t.Errorf("Expected ClusterQuorumRestored event to be published")
+ }
+}
+
+func TestClusterQuorumLostEvent(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ sch := pkgruntime.NewScheme()
+ utilruntime.Must(clientgoscheme.AddToScheme(sch))
+ utilruntime.Must(corev1.AddToScheme(sch))
+ utilruntime.Must(enterpriseApi.AddToScheme(sch))
+
+ builder := fake.NewClientBuilder().
+ WithScheme(sch).
+ WithStatusSubresource(&enterpriseApi.ClusterManager{}).
+ WithStatusSubresource(&enterpriseApi.IndexerCluster{})
+
+ client := builder.Build()
+ ctx := context.TODO()
+
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+
+ cm := enterpriseApi.ClusterManager{
+ TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"},
+ ObjectMeta: metav1.ObjectMeta{Name: "manager1", Namespace: "test"},
+ }
+ cm.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("ClusterManager"))
+ if err := client.Create(ctx, &cm); err != nil {
+ t.Fatalf("Failed to create ClusterManager: %v", err)
+ }
+
+ idxc := enterpriseApi.IndexerCluster{
+ TypeMeta: metav1.TypeMeta{Kind: "IndexerCluster"},
+ ObjectMeta: metav1.ObjectMeta{Name: "idxc", Namespace: cm.GetNamespace()},
+ Spec: enterpriseApi.IndexerClusterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ ClusterManagerRef: corev1.ObjectReference{Name: cm.GetName()},
+ },
+ },
+ }
+ idxc.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("IndexerCluster"))
+ if err := client.Create(ctx, &idxc); err != nil {
+ t.Fatalf("Failed to create IndexerCluster: %v", err)
+ }
+
+ // First call: set initial state to indexing ready using healthy cluster response
+ mockHandlers := []spltest.MockHTTPHandler{
+ {
+ Method: "GET",
+ URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/manager/info?count=0&output_mode=json",
+ Status: 200,
+ Body: splcommon.TestIndexerClusterPodManagerInfo,
+ },
+ {
+ Method: "GET",
+ URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/manager/peers?count=0&output_mode=json",
+ Status: 200,
+ Body: splcommon.TestIndexerClusterPodManagerPeer,
+ },
+ }
+ mockSplunkClient := &spltest.MockHTTPClient{}
+ mockSplunkClient.AddHandlers(mockHandlers...)
+
+ mgr := getIndexerClusterPodManager("TestClusterQuorumLostEvent", mockHandlers, mockSplunkClient, 3)
+ replicas := int32(3)
+ ss := &appsv1.StatefulSet{
+ Status: appsv1.StatefulSetStatus{Replicas: replicas, ReadyReplicas: replicas},
+ }
+
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+ c := spltest.NewMockClient()
+ mgr.c = c
+
+ mgr.cr.Status.IndexingReady = false
+ mgr.cr.Status.Initialized = false
+ err := mgr.updateStatus(ctx, ss)
+ if err != nil {
+ t.Fatalf("First updateStatus returned unexpected error: %v", err)
+ }
+ if !mgr.cr.Status.IndexingReady {
+ t.Fatal("Expected IndexingReady to be true after first updateStatus")
+ }
+
+ // Reset recorder and prepare second call with indexing_ready=false
+ recorder.events = []mockEvent{}
+ quorumLostInfo := `{"entry":[{"content":{"initialized_flag":true,"indexing_ready_flag":false,"service_ready_flag":true,"maintenance_mode":false,"rolling_restart_flag":false,"label":"splunk-manager1-cluster-manager-0","active_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7571-1583870198.bundle","checksum":"ABC123","timestamp":1583870198},"latest_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7571-1583870198.bundle","checksum":"ABC123","timestamp":1583870198},"multisite":"false","replication_factor":3,"site_replication_factor":"origin:2,total:3"}}]}`
+ quorumLostHandlers := []spltest.MockHTTPHandler{
+ {Method: "GET", URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/manager/info?count=0&output_mode=json", Status: 200, Body: quorumLostInfo},
+ {Method: "GET", URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/manager/peers?count=0&output_mode=json", Status: 200, Body: splcommon.TestIndexerClusterPodManagerPeer},
+ }
+ mockSplunkClient2 := &spltest.MockHTTPClient{}
+ mockSplunkClient2.AddHandlers(quorumLostHandlers...)
+ mgr.newSplunkClient = func(managementURI, username, password string) *splclient.SplunkClient {
+ sc := splclient.NewSplunkClient(managementURI, username, password)
+ sc.Client = mockSplunkClient2
+ return sc
+ }
+
+ err = mgr.updateStatus(ctx, ss)
+ if err != nil {
+ t.Fatalf("Second updateStatus returned unexpected error: %v", err)
+ }
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "ClusterQuorumLost" {
+ found = true
+ if event.eventType != corev1.EventTypeWarning {
+ t.Errorf("Expected Warning event type for ClusterQuorumLost, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, "quorum") {
+ t.Errorf("Expected event message to mention quorum, got: %s", event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected ClusterQuorumLost event to be published")
+ }
+}
+
+func TestScalingBlockedRFEvent(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ ctx := context.TODO()
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ // Use the same fixture and URL as TestVerifyRFPeers
+ mockHandlers := []spltest.MockHTTPHandler{
+ {
+ Method: "GET",
+ URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/config?count=0&output_mode=json",
+ Status: 200,
+ Body: loadFixture(t, "service_stack1_indexer_service.json"),
+ },
+ }
+ mockSplunkClient := &spltest.MockHTTPClient{}
+ mockSplunkClient.AddHandlers(mockHandlers...)
+
+ // replicas=1 which is less than RF=3 in the fixture
+ mgr := getIndexerClusterPodManager("TestScalingBlockedRFEvent", mockHandlers, mockSplunkClient, 1)
+
+ // Use spltest.NewMockClient which handles the Get call for the CM pod
+ c := spltest.NewMockClient()
+ err := mgr.verifyRFPeers(ctx, c)
+ if err != nil {
+ t.Fatalf("verifyRFPeers returned unexpected error: %v", err)
+ }
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "ScalingBlockedRF" {
+ found = true
+ if event.eventType != corev1.EventTypeWarning {
+ t.Errorf("Expected Warning event type for ScalingBlockedRF, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, "replication factor") {
+ t.Errorf("Expected event message to mention replication factor, got: %s", event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected ScalingBlockedRF event to be published")
+ }
+ if mgr.cr.Spec.Replicas == 1 {
+ t.Errorf("Expected replicas to be adjusted from 1 to replication factor")
+ }
+}
+
+func TestIdxcScaledUpScaledDownEvent(t *testing.T) {
+ ctx := context.TODO()
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ crName := "test-idxc"
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: crName, Namespace: "test"},
+ }
+
+ // Simulate ScaledUp: previousReplicas=1, desiredReplicas=3, phase=PhaseReady, Status.Replicas=3
+ previousReplicas := int32(1)
+ desiredReplicas := int32(3)
+ cr.Status.Replicas = desiredReplicas
+ phase := enterpriseApi.PhaseReady
+
+ // Replicate the production conditional from indexerClusterPodManager.Update()
+ ep := GetEventPublisher(ctx, cr)
+ if phase == enterpriseApi.PhaseReady {
+ if desiredReplicas > previousReplicas && cr.Status.Replicas == desiredReplicas {
+ ep.Normal(ctx, "ScaledUp",
+ fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas))
+ }
+ }
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "ScaledUp" {
+ found = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type for ScaledUp, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, crName) {
+ t.Errorf("Expected event message to contain CR name '%s', got: %s", crName, event.message)
+ }
+ if !strings.Contains(event.message, "1") || !strings.Contains(event.message, "3") {
+ t.Errorf("Expected event message to contain replica counts, got: %s", event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected ScaledUp event to be published")
+ }
+
+ // Simulate ScaledDown: previousReplicas=3, desiredReplicas=1, phase=PhaseReady, Status.Replicas=1
+ recorder.events = []mockEvent{}
+ previousReplicas = int32(3)
+ desiredReplicas = int32(1)
+ cr.Status.Replicas = desiredReplicas
+
+ if phase == enterpriseApi.PhaseReady {
+ if desiredReplicas < previousReplicas && cr.Status.Replicas == desiredReplicas {
+ ep.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas))
+ }
+ }
+
+ found = false
+ for _, event := range recorder.events {
+ if event.reason == "ScaledDown" {
+ found = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type for ScaledDown, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, crName) {
+ t.Errorf("Expected event message to contain CR name '%s', got: %s", crName, event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected ScaledDown event to be published")
+ }
+
+ // Negative: no event when phase is not PhaseReady
+ recorder.events = []mockEvent{}
+ phase = enterpriseApi.PhasePending
+ if phase == enterpriseApi.PhaseReady {
+ if desiredReplicas < previousReplicas && cr.Status.Replicas == desiredReplicas {
+ ep.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas))
+ }
+ }
+ if len(recorder.events) != 0 {
+ t.Errorf("Expected no events when phase is not PhaseReady, got %d events", len(recorder.events))
+ }
+
+ // Negative: no event when replicas haven't converged
+ recorder.events = []mockEvent{}
+ phase = enterpriseApi.PhaseReady
+ cr.Status.Replicas = int32(2) // not yet at desiredReplicas
+ if phase == enterpriseApi.PhaseReady {
+ if desiredReplicas < previousReplicas && cr.Status.Replicas == desiredReplicas {
+ ep.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas))
+ }
+ }
+ if len(recorder.events) != 0 {
+ t.Errorf("Expected no events when replicas haven't converged, got %d events", len(recorder.events))
+ }
+}
+
+func TestIdxcPasswordSyncFailedEvent(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ sch := pkgruntime.NewScheme()
+ utilruntime.Must(clientgoscheme.AddToScheme(sch))
+ utilruntime.Must(corev1.AddToScheme(sch))
+ utilruntime.Must(enterpriseApi.AddToScheme(sch))
+
+ builder := fake.NewClientBuilder().
+ WithScheme(sch).
+ WithStatusSubresource(&enterpriseApi.ClusterManager{}).
+ WithStatusSubresource(&enterpriseApi.IndexerCluster{})
+
+ c := builder.Build()
+ ctx := context.TODO()
+
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ // Create namespace scoped secret
+ nsSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, c, "test")
+ if err != nil {
+ t.Fatalf("Failed to apply namespace scoped secret: %v", err)
+ }
+
+ idxc := enterpriseApi.IndexerCluster{
+ TypeMeta: metav1.TypeMeta{Kind: "IndexerCluster"},
+ ObjectMeta: metav1.ObjectMeta{Name: "idxc", Namespace: "test"},
+ Spec: enterpriseApi.IndexerClusterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ ClusterManagerRef: corev1.ObjectReference{Name: "cm"},
+ },
+ },
+ }
+ idxc.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("IndexerCluster"))
+ // Set stale resource version so ApplyIdxcSecret doesn't early-return
+ idxc.Status.NamespaceSecretResourceVersion = nsSecret.ResourceVersion + "-old"
+ // Pre-set MaintenanceMode to skip the maintenance mode setup path
+ idxc.Status.MaintenanceMode = true
+ idxc.Status.IdxcPasswordChangedSecrets = make(map[string]bool)
+
+ // Create the indexer pod with a secret volume mount
+ podSecretName := "splunk-idxc-indexer-secret-v1"
+ indexerPodName := "splunk-idxc-indexer-0"
+ pod := &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: indexerPodName, Namespace: "test"},
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{{Name: "splunk", Image: "splunk/splunk:latest"}},
+ Volumes: []corev1.Volume{
+ {
+ Name: "mnt-splunk-secrets",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{SecretName: podSecretName},
+ },
+ },
+ },
+ },
+ }
+ if err := c.Create(ctx, pod); err != nil {
+ t.Fatalf("Failed to create pod: %v", err)
+ }
+
+ // Create the pod's secret with a DIFFERENT idxc_secret than namespace secret
+ podSecret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: podSecretName, Namespace: "test"},
+ Data: map[string][]byte{
+ "password": []byte("admin-password"),
+ "idxc_secret": []byte("old-idxc-secret"),
+ },
+ }
+ if err := c.Create(ctx, podSecret); err != nil {
+ t.Fatalf("Failed to create pod secret: %v", err)
+ }
+
+ // Create a mock HTTP client that returns an error on SetIdxcSecret POST
+ mockSplunkClient := &spltest.MockHTTPClient{}
+ mockSplunkClient.AddHandlers(spltest.MockHTTPHandler{
+ Method: "POST",
+ URL: fmt.Sprintf("https://splunk-idxc-indexer-0.splunk-idxc-indexer-headless.test.svc.cluster.local:8089/services/cluster/config/config?secret=%s", string(nsSecret.Data["idxc_secret"])),
+ Status: 500,
+ Err: fmt.Errorf("mock SetIdxcSecret failure"),
+ })
+
+ mgr := &indexerClusterPodManager{
+ c: c,
+ log: logging.FromContext(ctx).With("func", "TestIdxcPasswordSyncFailedEvent", "name", idxc.GetName(), "namespace", idxc.GetNamespace()),
+ cr: &idxc,
+ newSplunkClient: func(managementURI, username, password string) *splclient.SplunkClient {
+ sc := splclient.NewSplunkClient(managementURI, username, password)
+ sc.Client = mockSplunkClient
+ return sc
+ },
+ }
+
+ mockPodExecClient := &spltest.MockPodExecClient{}
+
+ // Call ApplyIdxcSecret — should fail at SetIdxcSecret and emit PasswordSyncFailed
+ err = ApplyIdxcSecret(ctx, mgr, 1, mockPodExecClient)
+ if err == nil {
+ t.Errorf("Expected error from ApplyIdxcSecret when SetIdxcSecret fails")
+ }
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "PasswordSyncFailed" {
+ found = true
+ if event.eventType != corev1.EventTypeWarning {
+ t.Errorf("Expected Warning event type for PasswordSyncFailed, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, indexerPodName) {
+ t.Errorf("Expected event message to contain pod name '%s', got: %s", indexerPodName, event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected PasswordSyncFailed event to be published")
+ }
+}
+
+// mockEvent stores event details for testing
+type mockEvent struct {
+ eventType string
+ reason string
+ message string
+}
+
+// mockEventRecorder implements record.EventRecorder for testing
+type mockEventRecorder struct {
+ events []mockEvent
+}
+
+func (m *mockEventRecorder) Event(object pkgruntime.Object, eventType, reason, message string) {
+ m.events = append(m.events, mockEvent{eventType: eventType, reason: reason, message: message})
+}
+
+func (m *mockEventRecorder) Eventf(object pkgruntime.Object, eventType, reason, messageFmt string, args ...interface{}) {
+ m.events = append(m.events, mockEvent{eventType: eventType, reason: reason, message: fmt.Sprintf(messageFmt, args...)})
+}
+
+func (m *mockEventRecorder) AnnotatedEventf(object pkgruntime.Object, annotations map[string]string, eventType, reason, messageFmt string, args ...interface{}) {
+ m.events = append(m.events, mockEvent{eventType: eventType, reason: reason, message: fmt.Sprintf(messageFmt, args...)})
+}
+
+func TestIdxcQueueConfigUpdatedEvent(t *testing.T) {
+ ctx := context.TODO()
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ crName := "test-idxc"
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: crName, Namespace: "test"},
+ }
+ cr.Spec.Replicas = 3
+
+ // Replicate the production conditional from ApplyIndexerClusterManager()
+ ep := GetEventPublisher(ctx, cr)
+ ep.Normal(ctx, "QueueConfigUpdated",
+ fmt.Sprintf("Queue/Pipeline configuration updated for %d indexers", cr.Spec.Replicas))
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "QueueConfigUpdated" {
+ found = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type for QueueConfigUpdated, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, "3") {
+ t.Errorf("Expected event message to contain replica count '3', got: %s", event.message)
+ }
+ if !strings.Contains(event.message, "Queue/Pipeline") {
+ t.Errorf("Expected event message to contain 'Queue/Pipeline', got: %s", event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected QueueConfigUpdated event to be published")
+ }
+}
+
+func TestIdxcIndexersRestartedEvent(t *testing.T) {
+ ctx := context.TODO()
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ crName := "test-idxc"
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: crName, Namespace: "test"},
+ }
+ cr.Spec.Replicas = 5
+
+ // Replicate the production conditional from ApplyIndexerClusterManager()
+ ep := GetEventPublisher(ctx, cr)
+ ep.Normal(ctx, "IndexersRestarted",
+ fmt.Sprintf("Restarted Splunk on %d indexer pods", cr.Spec.Replicas))
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "IndexersRestarted" {
+ found = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type for IndexersRestarted, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, "5") {
+ t.Errorf("Expected event message to contain replica count '5', got: %s", event.message)
+ }
+ if !strings.Contains(event.message, "Restarted Splunk") {
+ t.Errorf("Expected event message to contain 'Restarted Splunk', got: %s", event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected IndexersRestarted event to be published")
+ }
+}
diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go
new file mode 100644
index 000000000..47e4c4653
--- /dev/null
+++ b/pkg/splunk/enterprise/ingestorcluster.go
@@ -0,0 +1,503 @@
+// Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package enterprise
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "reflect"
+ "strings"
+ "time"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/splunk/splunk-operator/pkg/logging"
+ splclient "github.com/splunk/splunk-operator/pkg/splunk/client"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
+ splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller"
+ splutil "github.com/splunk/splunk-operator/pkg/splunk/util"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+)
+
+// ApplyIngestorCluster reconciles the state of an IngestorCluster custom resource
+func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpriseApi.IngestorCluster) (reconcile.Result, error) {
+ var err error
+
+ // Unless modified, reconcile for this object will be requeued after 5 seconds
+ result := reconcile.Result{
+ Requeue: true,
+ RequeueAfter: time.Second * 5,
+ }
+
+ logger := logging.FromContext(ctx).With("func", "ApplyIngestorCluster", "name", cr.GetName(), "namespace", cr.GetNamespace())
+
+ if cr.Status.ResourceRevMap == nil {
+ cr.Status.ResourceRevMap = make(map[string]string)
+ }
+
+ eventPublisher := GetEventPublisher(ctx, cr)
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ cr.Kind = "IngestorCluster"
+
+ // Validate and updates defaults for CR
+ err = validateIngestorClusterSpec(ctx, client, cr)
+ if err != nil {
+ eventPublisher.Warning(ctx, "SpecValidationFailure", fmt.Sprintf("validation of ingestor cluster spec failed due to %s", err.Error()))
+ logger.ErrorContext(ctx, "Failed to validate ingestor cluster spec", "error", err.Error())
+ return result, err
+ }
+
+ // Initialize phase
+ cr.Status.Phase = enterpriseApi.PhaseError
+
+ // Track previous ready replicas for scaling events
+ previousReadyReplicas := cr.Status.ReadyReplicas
+
+ // Update the CR Status
+ defer updateCRStatus(ctx, client, cr, &err)
+ if cr.Status.Replicas < cr.Spec.Replicas {
+ logger.InfoContext(ctx, "Scaling up ingestor cluster", "previousReplicas", cr.Status.Replicas, "newReplicas", cr.Spec.Replicas)
+ cr.Status.CredentialSecretVersion = "0"
+ cr.Status.ServiceAccount = ""
+ }
+ cr.Status.Replicas = cr.Spec.Replicas
+
+ // If needed, migrate the app framework status
+ err = checkAndMigrateAppDeployStatus(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig, true)
+ if err != nil {
+ return result, err
+ }
+
+ // If app framework is configured, then do following things
+ // Initialize the S3 clients based on providers
+ // Check the status of apps on remote storage
+ if len(cr.Spec.AppFrameworkConfig.AppSources) != 0 {
+ err = initAndCheckAppInfoStatus(ctx, client, cr, &cr.Spec.AppFrameworkConfig, &cr.Status.AppContext)
+ if err != nil {
+ eventPublisher.Warning(ctx, "AppInfoStatusInitializationFailure", fmt.Sprintf("init and check app info status failed due to %s", err.Error()))
+ cr.Status.AppContext.IsDeploymentInProgress = false
+ return result, err
+ }
+ }
+
+ cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-ingestor", cr.GetName())
+
+ // Create or update general config resources
+ namespaceScopedSecret, err := ApplySplunkConfig(ctx, client, cr, cr.Spec.CommonSplunkSpec, SplunkIngestor)
+ if err != nil {
+ eventPublisher.Warning(ctx, "ApplySplunkConfigFailure", fmt.Sprintf("apply of general config failed due to %s", err.Error()))
+ logger.ErrorContext(ctx, "create or update general config failed", "error", err.Error())
+ return result, err
+ }
+
+ // Check if deletion has been requested
+ if cr.ObjectMeta.DeletionTimestamp != nil {
+ if cr.Spec.MonitoringConsoleRef.Name != "" {
+ _, err = ApplyMonitoringConsoleEnvConfigMap(ctx, client, cr.GetNamespace(), cr.GetName(), cr.Spec.MonitoringConsoleRef.Name, make([]corev1.EnvVar, 0), false)
+ if err != nil {
+ eventPublisher.Warning(ctx, "ApplyMonitoringConsoleEnvConfigMapFailure", fmt.Sprintf("apply of monitoring console config map failed due to %s", err.Error()))
+ return result, err
+ }
+ }
+
+ // If this is the last of its kind getting deleted,
+ // remove the entry for this CR type from configMap or else
+ // just decrement the refCount for this CR type
+ if len(cr.Spec.AppFrameworkConfig.AppSources) != 0 {
+ err = UpdateOrRemoveEntryFromConfigMapLocked(ctx, client, cr, SplunkIngestor)
+ if err != nil {
+ return result, err
+ }
+ }
+
+ DeleteOwnerReferencesForResources(ctx, client, cr, SplunkIngestor)
+
+ terminating, err := splctrl.CheckForDeletion(ctx, cr, client)
+ if terminating && err != nil {
+ cr.Status.Phase = enterpriseApi.PhaseTerminating
+ } else {
+ result.Requeue = false
+ }
+ return result, err
+ }
+
+ // Create or update a headless service for ingestor cluster
+ err = splctrl.ApplyService(ctx, client, getSplunkService(ctx, cr, &cr.Spec.CommonSplunkSpec, SplunkIngestor, true))
+ if err != nil {
+ eventPublisher.Warning(ctx, "ApplyServiceFailure", fmt.Sprintf("apply of headless service failed due to %s", err.Error()))
+ return result, err
+ }
+
+ // Create or update a regular service for ingestor cluster
+ err = splctrl.ApplyService(ctx, client, getSplunkService(ctx, cr, &cr.Spec.CommonSplunkSpec, SplunkIngestor, false))
+ if err != nil {
+ eventPublisher.Warning(ctx, "ApplyServiceFailure", fmt.Sprintf("apply of service failed due to %s", err.Error()))
+ return result, err
+ }
+
+ // If we are using App Framework and are scaling up, we should re-populate the
+ // config map with all the appSource entries
+ // This is done so that the new pods
+ // that come up now will have the complete list of all the apps and then can
+ // download and install all the apps
+ // If we are scaling down, just update the auxPhaseInfo list
+ if len(cr.Spec.AppFrameworkConfig.AppSources) != 0 && cr.Status.ReadyReplicas > 0 {
+ statefulsetName := GetSplunkStatefulsetName(SplunkIngestor, cr.GetName())
+
+ isStatefulSetScaling, err := splctrl.IsStatefulSetScalingUpOrDown(ctx, client, cr, statefulsetName, cr.Spec.Replicas)
+ if err != nil {
+ return result, err
+ }
+
+ appStatusContext := cr.Status.AppContext
+
+ switch isStatefulSetScaling {
+ case enterpriseApi.StatefulSetScalingUp:
+ // If we are indeed scaling up, then mark the deploy status to Pending
+ // for all the app sources so that we add all the app sources in config map
+ cr.Status.AppContext.IsDeploymentInProgress = true
+
+ for appSrc := range appStatusContext.AppsSrcDeployStatus {
+ changeAppSrcDeployInfoStatus(ctx, appSrc, appStatusContext.AppsSrcDeployStatus, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusComplete, enterpriseApi.DeployStatusPending)
+ changePhaseInfo(ctx, cr.Spec.Replicas, appSrc, appStatusContext.AppsSrcDeployStatus)
+ }
+
+ // If we are scaling down, just delete the state auxPhaseInfo entries
+ case enterpriseApi.StatefulSetScalingDown:
+ for appSrc := range appStatusContext.AppsSrcDeployStatus {
+ removeStaleEntriesFromAuxPhaseInfo(ctx, cr.Spec.Replicas, appSrc, appStatusContext.AppsSrcDeployStatus)
+ }
+ }
+ }
+
+ // Create or update statefulset for the ingestors
+ statefulSet, err := getIngestorStatefulSet(ctx, client, cr)
+ if err != nil {
+ eventPublisher.Warning(ctx, "GetIngestorStatefulSetFailure", fmt.Sprintf("get stateful set failed due to %s", err.Error()))
+ return result, err
+ }
+
+ // Make changes to respective mc configmap when changing/removing mcRef from spec
+ err = validateMonitoringConsoleRef(ctx, client, statefulSet, make([]corev1.EnvVar, 0))
+ if err != nil {
+ eventPublisher.Warning(ctx, "MonitoringConsoleRefValidationFailure", fmt.Sprintf("monitoring console reference validation failed due to %s", err.Error()))
+ return result, err
+ }
+
+ mgr := splctrl.DefaultStatefulSetPodManager{}
+ phase, err := mgr.Update(ctx, client, statefulSet, cr.Spec.Replicas)
+ cr.Status.ReadyReplicas = statefulSet.Status.ReadyReplicas
+ if err != nil {
+ eventPublisher.Warning(ctx, "UpdateStatefulSetFailure", fmt.Sprintf("stateful set update failed due to %s", err.Error()))
+ return result, err
+ }
+ cr.Status.Phase = phase
+
+ // Emit scaling events when phase is ready and ready replicas changed to match desired
+ if phase == enterpriseApi.PhaseReady {
+ desiredReplicas := cr.Spec.Replicas
+ if cr.Status.ReadyReplicas == desiredReplicas && previousReadyReplicas != desiredReplicas {
+ if desiredReplicas > previousReadyReplicas {
+ eventPublisher.Normal(ctx, "ScaledUp",
+ fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", cr.GetName(), previousReadyReplicas, desiredReplicas))
+ } else if desiredReplicas < previousReadyReplicas {
+ eventPublisher.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReadyReplicas, desiredReplicas))
+ }
+ }
+ }
+
+ // No need to requeue if everything is ready
+ if cr.Status.Phase == enterpriseApi.PhaseReady {
+ qosCfg, err := ResolveQueueAndObjectStorage(ctx, client, cr, cr.Spec.QueueRef, cr.Spec.ObjectStorageRef, cr.Spec.ServiceAccount)
+ if err != nil {
+ logger.ErrorContext(ctx, "Failed to resolve Queue/ObjectStorage config", "error", err.Error())
+ return result, err
+ }
+ logger.DebugContext(ctx, "Resolved Queue/ObjectStorage config", "queue", qosCfg.Queue, "objectStorage", qosCfg.OS, "version", qosCfg.Version, "serviceAccount", cr.Spec.ServiceAccount)
+
+ secretChanged := cr.Status.CredentialSecretVersion != qosCfg.Version
+ serviceAccountChanged := cr.Status.ServiceAccount != cr.Spec.ServiceAccount
+
+ logger.InfoContext(ctx, "Checking for changes", "previousCredentialSecretVersion", cr.Status.CredentialSecretVersion, "previousServiceAccount", cr.Status.ServiceAccount, "secretChanged", secretChanged, "serviceAccountChanged", serviceAccountChanged)
+
+ // If queue is updated
+ if secretChanged || serviceAccountChanged {
+ ingMgr := newIngestorClusterPodManager(logger, cr, namespaceScopedSecret, splclient.NewSplunkClient, client)
+ err = ingMgr.updateIngestorConfFiles(ctx, cr, &qosCfg.Queue, &qosCfg.OS, qosCfg.AccessKey, qosCfg.SecretKey, client)
+ if err != nil {
+ eventPublisher.Warning(ctx, "UpdateConfFilesFailure", fmt.Sprintf("failed to update conf file for Queue/Pipeline config due to %s", err.Error()))
+ logger.ErrorContext(ctx, "Failed to update conf file for Queue/Pipeline config", "error", err.Error())
+ return result, err
+ }
+
+ eventPublisher.Normal(ctx, "QueueConfigUpdated",
+ fmt.Sprintf("Queue/Pipeline configuration updated for %d ingestors", cr.Spec.Replicas))
+ logger.InfoContext(ctx, "Queue/Pipeline configuration updated", "readyReplicas", cr.Status.ReadyReplicas)
+
+ for i := int32(0); i < cr.Spec.Replicas; i++ {
+ ingClient := ingMgr.getClient(ctx, i)
+ err = ingClient.RestartSplunk()
+ if err != nil {
+ return result, err
+ }
+ logger.DebugContext(ctx, "Restarted splunk", "ingestor", i)
+ }
+
+ eventPublisher.Normal(ctx, "IngestorsRestarted",
+ fmt.Sprintf("Restarted Splunk on %d ingestor pods", cr.Spec.Replicas))
+
+ cr.Status.CredentialSecretVersion = qosCfg.Version
+ cr.Status.ServiceAccount = cr.Spec.ServiceAccount
+
+ logger.InfoContext(ctx, "Updated status", "credentialSecretVersion", cr.Status.CredentialSecretVersion, "serviceAccount", cr.Status.ServiceAccount)
+ }
+
+ // Upgrade from automated MC to MC CRD
+ namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: GetSplunkStatefulsetName(SplunkMonitoringConsole, cr.GetNamespace())}
+ err = splctrl.DeleteReferencesToAutomatedMCIfExists(ctx, client, cr, namespacedName)
+ if err != nil {
+ eventPublisher.Warning(ctx, "MCReferencesDeletionFailure", fmt.Sprintf("reference to automated MC if exists failed due to %s", err.Error()))
+ logger.ErrorContext(ctx, "Error in deleting automated monitoring console resource", "error", err.Error())
+ }
+ if cr.Spec.MonitoringConsoleRef.Name != "" {
+ _, err = ApplyMonitoringConsoleEnvConfigMap(ctx, client, cr.GetNamespace(), cr.GetName(), cr.Spec.MonitoringConsoleRef.Name, make([]corev1.EnvVar, 0), true)
+ if err != nil {
+ eventPublisher.Warning(ctx, "ApplyMonitoringConsoleEnvConfigMapFailure", fmt.Sprintf("apply of monitoring console environment config map failed due to %s", err.Error()))
+ return result, err
+ }
+ }
+
+ finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig)
+ result = *finalResult
+
+ // Add a splunk operator telemetry app
+ if cr.Spec.EtcVolumeStorageConfig.EphemeralStorage || !cr.Status.TelAppInstalled {
+ podExecClient := splutil.GetPodExecClient(client, cr, "")
+ err = addTelApp(ctx, podExecClient, cr.Spec.Replicas, cr)
+ if err != nil {
+ return result, err
+ }
+
+ // Mark telemetry app as installed
+ cr.Status.TelAppInstalled = true
+ }
+ }
+
+ // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration.
+ // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter.
+ if !result.Requeue {
+ result.RequeueAfter = 0
+ }
+
+ return result, nil
+}
+
+// getClient for ingestorClusterPodManager returns a SplunkClient for the member n
+func (mgr *ingestorClusterPodManager) getClient(ctx context.Context, n int32) *splclient.SplunkClient {
+ logger := logging.FromContext(ctx).With("func", "getClient", "name", mgr.cr.GetName(), "namespace", mgr.cr.GetNamespace())
+
+ // Get Pod Name
+ memberName := GetSplunkStatefulsetPodName(SplunkIngestor, mgr.cr.GetName(), n)
+
+ // Get Fully Qualified Domain Name
+ fqdnName := splcommon.GetServiceFQDN(mgr.cr.GetNamespace(),
+ fmt.Sprintf("%s.%s", memberName, GetSplunkServiceName(SplunkIngestor, mgr.cr.GetName(), true)))
+
+ // Retrieve admin password from Pod
+ adminPwd, err := splutil.GetSpecificSecretTokenFromPod(ctx, mgr.c, memberName, mgr.cr.GetNamespace(), "password")
+ if err != nil {
+ logger.ErrorContext(ctx, "Couldn't retrieve the admin password from pod", "error", err.Error())
+ }
+
+ return mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", adminPwd)
+}
+
+// validateIngestorClusterSpec checks validity and makes default updates to a IngestorClusterSpec and returns error if something is wrong
+func validateIngestorClusterSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.IngestorCluster) error {
+ // We cannot have 0 replicas in IngestorCluster spec since this refers to number of ingestion pods in the ingestor cluster
+ if cr.Spec.Replicas < 1 {
+ cr.Spec.Replicas = 1
+ }
+
+ if !reflect.DeepEqual(cr.Status.AppContext.AppFrameworkConfig, cr.Spec.AppFrameworkConfig) {
+ err := ValidateAppFrameworkSpec(ctx, &cr.Spec.AppFrameworkConfig, &cr.Status.AppContext, true, cr.GetObjectKind().GroupVersionKind().Kind)
+ if err != nil {
+ return err
+ }
+ }
+
+ return validateCommonSplunkSpec(ctx, c, &cr.Spec.CommonSplunkSpec, cr)
+}
+
+// getIngestorStatefulSet returns a Kubernetes StatefulSet object for Splunk Enterprise ingestors
+func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IngestorCluster) (*appsv1.StatefulSet, error) {
+ ss, err := getSplunkStatefulSet(ctx, client, cr, &cr.Spec.CommonSplunkSpec, SplunkIngestor, cr.Spec.Replicas, []corev1.EnvVar{})
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup App framework staging volume for apps
+ setupAppsStagingVolume(ctx, client, cr, &ss.Spec.Template, &cr.Spec.AppFrameworkConfig)
+
+ return ss, nil
+}
+
+// updateIngestorConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so
+func (mgr *ingestorClusterPodManager) updateIngestorConfFiles(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string, k8s client.Client) error {
+ logger := logging.FromContext(ctx).With("func", "updateIngestorConfFiles", "name", newCR.GetName(), "namespace", newCR.GetNamespace())
+
+ // Only update config for pods that exist
+ readyReplicas := newCR.Status.ReadyReplicas
+
+ // List all pods for this IngestorCluster StatefulSet
+ var updateErr error
+ for n := 0; n < int(readyReplicas); n++ {
+ memberName := GetSplunkStatefulsetPodName(SplunkIngestor, newCR.GetName(), int32(n))
+ fqdnName := splcommon.GetServiceFQDN(newCR.GetNamespace(), fmt.Sprintf("%s.%s", memberName, GetSplunkServiceName(SplunkIngestor, newCR.GetName(), true)))
+ adminPwd, err := splutil.GetSpecificSecretTokenFromPod(ctx, k8s, memberName, newCR.GetNamespace(), "password")
+ if err != nil {
+ return err
+ }
+ splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd))
+
+ queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(queue, os, accessKey, secretKey)
+
+ for _, input := range queueInputs {
+ if !strings.Contains(input[0], "access_key") && !strings.Contains(input[0], "secret_key") {
+ logger.InfoContext(ctx, "Updating queue input in outputs.conf", "input", input)
+ }
+ if err := splunkClient.UpdateConfFile(ctx, logger, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{input}); err != nil {
+ updateErr = err
+ }
+ }
+
+ for _, input := range pipelineInputs {
+ logger.InfoContext(ctx, "Updating pipeline input in default-mode.conf", "input", input)
+ if err := splunkClient.UpdateConfFile(ctx, logger, "default-mode", input[0], [][]string{{input[1], input[2]}}); err != nil {
+ updateErr = err
+ }
+ }
+
+ logger.InfoContext(ctx, "Updated conf files for pod", "pod", memberName)
+ }
+
+ return updateErr
+}
+
+// getQueueAndPipelineInputsForIngestorConfFiles returns a list of queue and pipeline inputs for ingestor pods conf files
+func getQueueAndPipelineInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (queueInputs, pipelineInputs [][]string) {
+ // Queue Inputs
+ queueInputs = getQueueAndObjectStorageInputsForIngestorConfFiles(queue, os, accessKey, secretKey)
+
+ // Pipeline inputs
+ pipelineInputs = getPipelineInputsForConfFile(false)
+
+ return
+}
+
+type ingestorClusterPodManager struct {
+ c splcommon.ControllerClient
+ log *slog.Logger
+ cr *enterpriseApi.IngestorCluster
+ secrets *corev1.Secret
+ newSplunkClient func(managementURI, username, password string) *splclient.SplunkClient
+}
+
+// newIngestorClusterPodManager creates pod manager to handle unit test cases
+var newIngestorClusterPodManager = func(log *slog.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) ingestorClusterPodManager {
+ return ingestorClusterPodManager{
+ log: log,
+ cr: cr,
+ secrets: secret,
+ newSplunkClient: newSplunkClient,
+ c: c,
+ }
+}
+
+// getPipelineInputsForConfFile returns a list of pipeline inputs for conf file
+func getPipelineInputsForConfFile(isIndexer bool) (config [][]string) {
+ config = append(config,
+ []string{"pipeline:remotequeueruleset", "disabled", "false"},
+ []string{"pipeline:ruleset", "disabled", "true"},
+ []string{"pipeline:remotequeuetyping", "disabled", "false"},
+ []string{"pipeline:remotequeueoutput", "disabled", "false"},
+ []string{"pipeline:typing", "disabled", "true"},
+ )
+ if !isIndexer {
+ config = append(config, []string{"pipeline:indexerPipe", "disabled", "true"})
+ }
+
+ return
+}
+
+// getQueueAndObjectStorageInputsForConfFiles returns a list of queue and object storage inputs for conf files
+func getQueueAndObjectStorageInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (config [][]string) {
+ queueProvider := ""
+ authRegion := ""
+ endpoint := ""
+ dlq := ""
+ if queue.Provider == "sqs" {
+ queueProvider = "sqs_smartbus"
+ } else if queue.Provider == "sqs_cp" {
+ queueProvider = "sqs_smartbus_cp"
+ }
+ if queue.Provider == "sqs" || queue.Provider == "sqs_cp" {
+ authRegion = queue.SQS.AuthRegion
+ endpoint = queue.SQS.Endpoint
+ dlq = queue.SQS.DLQ
+ }
+
+ path := ""
+ osEndpoint := ""
+ osProvider := ""
+ if os.Provider == "s3" {
+ if queueProvider == "sqs_smartbus" {
+ osProvider = "sqs_smartbus"
+ } else if queueProvider == "sqs_smartbus_cp" {
+ osProvider = "sqs_smartbus_cp"
+ }
+ osEndpoint = os.S3.Endpoint
+ path = os.S3.Path
+ if !strings.HasPrefix(path, "s3://") {
+ path = "s3://" + path
+ }
+ }
+
+ config = append(config,
+ []string{"remote_queue.type", queueProvider},
+ []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), authRegion},
+ []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), endpoint},
+ []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), osEndpoint},
+ []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), path},
+ []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), dlq},
+ []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"},
+ []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"},
+ []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"},
+ []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"},
+ )
+
+ if accessKey != "" && secretKey != "" {
+ config = append(config, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), accessKey})
+ config = append(config, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), secretKey})
+ }
+
+ return
+}
diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go
new file mode 100644
index 000000000..a805928f2
--- /dev/null
+++ b/pkg/splunk/enterprise/ingestorcluster_test.go
@@ -0,0 +1,961 @@
+// Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package enterprise
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ splclient "github.com/splunk/splunk-operator/pkg/splunk/client"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
+ spltest "github.com/splunk/splunk-operator/pkg/splunk/test"
+ splutil "github.com/splunk/splunk-operator/pkg/splunk/util"
+ "github.com/stretchr/testify/assert"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+func init() {
+ GetReadinessScriptLocation = func() string {
+ fileLocation, _ := filepath.Abs("../../../" + readinessScriptLocation)
+ return fileLocation
+ }
+ GetLivenessScriptLocation = func() string {
+ fileLocation, _ := filepath.Abs("../../../" + livenessScriptLocation)
+ return fileLocation
+ }
+ GetStartupScriptLocation = func() string {
+ fileLocation, _ := filepath.Abs("../../../" + startupScriptLocation)
+ return fileLocation
+ }
+}
+
+func TestApplyIngestorCluster(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ ctx := context.TODO()
+
+ scheme := runtime.NewScheme()
+ _ = enterpriseApi.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+ _ = appsv1.AddToScheme(scheme)
+ c := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ // Object definitions
+ provider := "sqs_smartbus"
+
+ queue := &enterpriseApi.Queue{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Queue",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "test-queue",
+ AuthRegion: "us-west-2",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ DLQ: "sqs-dlq-test",
+ },
+ },
+ }
+ c.Create(ctx, queue)
+
+ os := &enterpriseApi.ObjectStorage{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ObjectStorage",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "os",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-west-2.amazonaws.com",
+ Path: "bucket/key",
+ },
+ },
+ }
+ c.Create(ctx, os)
+
+ cr := &enterpriseApi.IngestorCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "IngestorCluster",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.IngestorClusterSpec{
+ Replicas: 3,
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Mock: true,
+ ServiceAccount: "sa",
+ },
+ QueueRef: corev1.ObjectReference{
+ Name: queue.Name,
+ Namespace: queue.Namespace,
+ },
+ ObjectStorageRef: corev1.ObjectReference{
+ Name: os.Name,
+ Namespace: os.Namespace,
+ },
+ },
+ }
+ c.Create(ctx, cr)
+
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-secrets",
+ Namespace: "test",
+ },
+ Data: map[string][]byte{"password": []byte("dummy")},
+ }
+ c.Create(ctx, secret)
+
+ probeConfigMap := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-test-probe-configmap",
+ Namespace: "test",
+ },
+ }
+ c.Create(ctx, probeConfigMap)
+
+ replicas := int32(3)
+ sts := &appsv1.StatefulSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-test-ingestor",
+ Namespace: "test",
+ },
+ Spec: appsv1.StatefulSetSpec{
+ Replicas: &replicas,
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ },
+ },
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ },
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "splunk-test-ingestor",
+ Image: "splunk/splunk:latest",
+ Ports: []corev1.ContainerPort{
+ {
+ Name: "http",
+ ContainerPort: 8080,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Status: appsv1.StatefulSetStatus{
+ Replicas: replicas,
+ ReadyReplicas: replicas,
+ UpdatedReplicas: replicas,
+ CurrentRevision: "v1",
+ UpdateRevision: "v1",
+ },
+ }
+ c.Create(ctx, sts)
+
+ pod0 := &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-test-ingestor-0",
+ Namespace: "test",
+ Labels: map[string]string{
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "controller-revision-hash": "v1",
+ },
+ },
+ Spec: corev1.PodSpec{
+ Volumes: []corev1.Volume{
+ {
+ Name: "dummy-volume",
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{},
+ },
+ },
+ {
+ Name: "mnt-splunk-secrets",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test-secrets",
+ },
+ },
+ },
+ },
+ },
+ Status: corev1.PodStatus{
+ Phase: corev1.PodRunning,
+ ContainerStatuses: []corev1.ContainerStatus{
+ {Ready: true},
+ },
+ },
+ }
+
+ pod1 := pod0.DeepCopy()
+ pod1.ObjectMeta.Name = "splunk-test-ingestor-1"
+
+ pod2 := pod0.DeepCopy()
+ pod2.ObjectMeta.Name = "splunk-test-ingestor-2"
+
+ c.Create(ctx, pod0)
+ c.Create(ctx, pod1)
+ c.Create(ctx, pod2)
+
+ // ApplyIngestorCluster
+ cr.Spec.Replicas = replicas
+ cr.Status.ReadyReplicas = cr.Spec.Replicas
+
+ result, err := ApplyIngestorCluster(ctx, c, cr)
+ assert.NoError(t, err)
+ assert.True(t, result.Requeue)
+ assert.NotEqual(t, enterpriseApi.PhaseError, cr.Status.Phase)
+
+ // outputs.conf
+ origNew := newIngestorClusterPodManager
+ mockHTTPClient := &spltest.MockHTTPClient{}
+ newIngestorClusterPodManager = func(l *slog.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, _ NewSplunkClientFunc, c splcommon.ControllerClient) ingestorClusterPodManager {
+ return ingestorClusterPodManager{
+ c: c,
+ log: l, cr: cr, secrets: secret,
+ newSplunkClient: func(uri, user, pass string) *splclient.SplunkClient {
+ return &splclient.SplunkClient{ManagementURI: uri, Username: user, Password: pass, Client: mockHTTPClient}
+ },
+ }
+ }
+ defer func() { newIngestorClusterPodManager = origNew }()
+
+ propertyKVList := [][]string{
+ {"remote_queue.type", provider},
+ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"},
+ {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion},
+ {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path},
+ {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ},
+ {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"},
+ {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"},
+ {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"},
+ }
+
+ body := buildFormBody(propertyKVList)
+ addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body)
+
+ // default-mode.conf
+ propertyKVList = [][]string{
+ {"pipeline:remotequeueruleset", "disabled", "false"},
+ {"pipeline:ruleset", "disabled", "true"},
+ {"pipeline:remotequeuetyping", "disabled", "false"},
+ {"pipeline:remotequeueoutput", "disabled", "false"},
+ {"pipeline:typing", "disabled", "true"},
+ {"pipeline:indexerPipe", "disabled", "true"},
+ }
+
+ for i := 0; i < int(cr.Status.ReadyReplicas); i++ {
+ podName := fmt.Sprintf("splunk-test-ingestor-%d", i)
+ baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName, cr.GetName(), cr.GetNamespace())
+
+ for _, field := range propertyKVList {
+ req, _ := http.NewRequest("POST", baseURL, strings.NewReader(fmt.Sprintf("name=%s", field[0])))
+ mockHTTPClient.AddHandler(req, 200, "", nil)
+
+ updateURL := fmt.Sprintf("%s/%s", baseURL, field[0])
+ req, _ = http.NewRequest("POST", updateURL, strings.NewReader(fmt.Sprintf("%s=%s", field[1], field[2])))
+ mockHTTPClient.AddHandler(req, 200, "", nil)
+ }
+ }
+
+ for i := 0; i < int(cr.Status.ReadyReplicas); i++ {
+ podName := fmt.Sprintf("splunk-test-ingestor-%d", i)
+ baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/services/server/control/restart", podName, cr.GetName(), cr.GetNamespace())
+ req, _ := http.NewRequest("POST", baseURL, nil)
+ mockHTTPClient.AddHandler(req, 200, "", nil)
+ }
+
+ // Second reconcile should now yield Ready
+ cr.Status.TelAppInstalled = true
+ result, err = ApplyIngestorCluster(ctx, c, cr)
+ assert.NoError(t, err)
+ assert.Equal(t, enterpriseApi.PhaseReady, cr.Status.Phase)
+}
+
+func TestGetIngestorStatefulSet(t *testing.T) {
+ // Object definitions
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ queue := enterpriseApi.Queue{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Queue",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "test-queue",
+ AuthRegion: "us-west-2",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ DLQ: "sqs-dlq-test",
+ },
+ },
+ }
+
+ cr := enterpriseApi.IngestorCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "IngestorCluster",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.IngestorClusterSpec{
+ Replicas: 0,
+ QueueRef: corev1.ObjectReference{
+ Name: queue.Name,
+ },
+ },
+ }
+
+ ctx := context.TODO()
+
+ c := spltest.NewMockClient()
+ _, err := splutil.ApplyNamespaceScopedSecretObject(ctx, c, "test")
+ if err != nil {
+ t.Errorf("Failed to create namespace scoped object")
+ }
+
+ test := func(want string) {
+ f := func() (interface{}, error) {
+ if err := validateIngestorClusterSpec(ctx, c, &cr); err != nil {
+ t.Errorf("validateIngestorClusterSpec() returned error: %v", err)
+ }
+ return getIngestorStatefulSet(ctx, c, &cr)
+ }
+ configTester(t, "getIngestorStatefulSet()", f, want)
+ }
+
+ // Define additional service port in CR and verify the statefulset has the new port
+ cr.Spec.ServiceTemplate.Spec.Ports = []corev1.ServicePort{{Name: "user-defined", Port: 32000, Protocol: "UDP"}}
+ test(loadFixture(t, "statefulset_ingestor.json"))
+
+ // Create a service account
+ current := corev1.ServiceAccount{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "defaults",
+ Namespace: "test",
+ },
+ }
+ _ = splutil.CreateResource(ctx, c, ¤t)
+ cr.Spec.ServiceAccount = "defaults"
+ test(loadFixture(t, "statefulset_ingestor_with_serviceaccount.json"))
+
+ // Add extraEnv
+ cr.Spec.CommonSplunkSpec.ExtraEnv = []corev1.EnvVar{
+ {
+ Name: "TEST_ENV_VAR",
+ Value: "test_value",
+ },
+ }
+ test(loadFixture(t, "statefulset_ingestor_with_extraenv.json"))
+
+ // Add additional label to cr metadata to transfer to the statefulset
+ cr.ObjectMeta.Labels = make(map[string]string)
+ cr.ObjectMeta.Labels["app.kubernetes.io/test-extra-label"] = "test-extra-label-value"
+ test(loadFixture(t, "statefulset_ingestor_with_labels.json"))
+}
+
+func TestGetQueueAndPipelineInputsForIngestorConfFiles(t *testing.T) {
+ provider := "sqs_smartbus"
+
+ queue := enterpriseApi.Queue{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Queue",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "test-queue",
+ AuthRegion: "us-west-2",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ DLQ: "sqs-dlq-test",
+ VolList: []enterpriseApi.VolumeSpec{
+ {SecretRef: "secret"},
+ },
+ },
+ },
+ }
+
+ os := enterpriseApi.ObjectStorage{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ObjectStorage",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "os",
+ },
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-west-2.amazonaws.com",
+ Path: "bucket/key",
+ },
+ },
+ }
+
+ key := "key"
+ secret := "secret"
+
+ queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(&queue.Spec, &os.Spec, key, secret)
+
+ assert.Equal(t, 12, len(queueInputs))
+ assert.Equal(t, [][]string{
+ {"remote_queue.type", provider},
+ {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion},
+ {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), "s3://" + os.Spec.S3.Path},
+ {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ},
+ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"},
+ {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"},
+ {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"},
+ {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"},
+ {fmt.Sprintf("remote_queue.%s.access_key", provider), key},
+ {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret},
+ }, queueInputs)
+
+ assert.Equal(t, 6, len(pipelineInputs))
+ assert.Equal(t, [][]string{
+ {"pipeline:remotequeueruleset", "disabled", "false"},
+ {"pipeline:ruleset", "disabled", "true"},
+ {"pipeline:remotequeuetyping", "disabled", "false"},
+ {"pipeline:remotequeueoutput", "disabled", "false"},
+ {"pipeline:typing", "disabled", "true"},
+ {"pipeline:indexerPipe", "disabled", "true"},
+ }, pipelineInputs)
+}
+
+func TestGetQueueAndPipelineInputsForIngestorConfFilesSQSCP(t *testing.T) {
+ provider := "sqs_smartbus_cp"
+
+ queue := enterpriseApi.Queue{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Queue",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs_cp",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "test-queue",
+ AuthRegion: "us-west-2",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ DLQ: "sqs-dlq-test",
+ VolList: []enterpriseApi.VolumeSpec{
+ {SecretRef: "secret"},
+ },
+ },
+ },
+ }
+
+ os := enterpriseApi.ObjectStorage{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ObjectStorage",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "os",
+ },
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-west-2.amazonaws.com",
+ Path: "bucket/key",
+ },
+ },
+ }
+
+ key := "key"
+ secret := "secret"
+
+ queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(&queue.Spec, &os.Spec, key, secret)
+
+ assert.Equal(t, 12, len(queueInputs))
+ assert.Equal(t, [][]string{
+ {"remote_queue.type", provider},
+ {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion},
+ {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), "s3://" + os.Spec.S3.Path},
+ {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ},
+ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"},
+ {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"},
+ {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"},
+ {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"},
+ {fmt.Sprintf("remote_queue.%s.access_key", provider), key},
+ {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret},
+ }, queueInputs)
+
+ assert.Equal(t, 6, len(pipelineInputs))
+ assert.Equal(t, [][]string{
+ {"pipeline:remotequeueruleset", "disabled", "false"},
+ {"pipeline:ruleset", "disabled", "true"},
+ {"pipeline:remotequeuetyping", "disabled", "false"},
+ {"pipeline:remotequeueoutput", "disabled", "false"},
+ {"pipeline:typing", "disabled", "true"},
+ {"pipeline:indexerPipe", "disabled", "true"},
+ }, pipelineInputs)
+}
+
+func TestUpdateIngestorConfFiles(t *testing.T) {
+ c := spltest.NewMockClient()
+ ctx := context.TODO()
+
+ // Object definitions
+ provider := "sqs_smartbus"
+
+ accessKey := "accessKey"
+ secretKey := "secretKey"
+
+ queue := &enterpriseApi.Queue{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Queue",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "test-queue",
+ AuthRegion: "us-west-2",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ DLQ: "sqs-dlq-test",
+ },
+ },
+ }
+
+ os := &enterpriseApi.ObjectStorage{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ObjectStorage",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "os",
+ },
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-west-2.amazonaws.com",
+ Path: "bucket/key",
+ },
+ },
+ }
+
+ cr := &enterpriseApi.IngestorCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "IngestorCluster",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.IngestorClusterSpec{
+ QueueRef: corev1.ObjectReference{
+ Name: queue.Name,
+ },
+ ObjectStorageRef: corev1.ObjectReference{
+ Name: os.Name,
+ },
+ },
+ Status: enterpriseApi.IngestorClusterStatus{
+ Replicas: 3,
+ ReadyReplicas: 3,
+ CredentialSecretVersion: "123",
+ },
+ }
+
+ pod0 := &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-test-ingestor-0",
+ Namespace: "test",
+ Labels: map[string]string{
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ },
+ },
+ Spec: corev1.PodSpec{
+ Volumes: []corev1.Volume{
+ {
+ Name: "dummy-volume",
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{},
+ },
+ },
+ {
+ Name: "mnt-splunk-secrets",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test-secrets",
+ },
+ },
+ },
+ },
+ },
+ Status: corev1.PodStatus{
+ Phase: corev1.PodRunning,
+ ContainerStatuses: []corev1.ContainerStatus{
+ {Ready: true},
+ },
+ },
+ }
+
+ pod1 := pod0.DeepCopy()
+ pod1.ObjectMeta.Name = "splunk-test-ingestor-1"
+
+ pod2 := pod0.DeepCopy()
+ pod2.ObjectMeta.Name = "splunk-test-ingestor-2"
+
+ c.Create(ctx, pod0)
+ c.Create(ctx, pod1)
+ c.Create(ctx, pod2)
+
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-secrets",
+ Namespace: "test",
+ },
+ Data: map[string][]byte{
+ "password": []byte("dummy"),
+ },
+ }
+
+ // Negative test case: secret not found
+ mgr := &ingestorClusterPodManager{}
+
+ err := mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c)
+ assert.NotNil(t, err)
+
+ // Mock secret
+ c.Create(ctx, secret)
+
+ mockHTTPClient := &spltest.MockHTTPClient{}
+
+ // Negative test case: failure in creating remote queue stanza
+ mgr = newTestIngestorQueuePipelineManager(mockHTTPClient)
+
+ err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c)
+ assert.NotNil(t, err)
+
+ // outputs.conf
+ propertyKVList := [][]string{
+ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"},
+ {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion},
+ {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint},
+ {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path},
+ {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ},
+ {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", provider), "4"},
+ {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"},
+ {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"},
+ }
+
+ body := buildFormBody(propertyKVList)
+ addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body)
+
+ // Negative test case: failure in creating remote queue stanza
+ mgr = newTestIngestorQueuePipelineManager(mockHTTPClient)
+
+ err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c)
+ assert.NotNil(t, err)
+
+ // default-mode.conf
+ propertyKVList = [][]string{
+ {"pipeline:remotequeueruleset", "disabled", "false"},
+ {"pipeline:ruleset", "disabled", "true"},
+ {"pipeline:remotequeuetyping", "disabled", "false"},
+ {"pipeline:remotequeueoutput", "disabled", "false"},
+ {"pipeline:typing", "disabled", "true"},
+ {"pipeline:indexerPipe", "disabled", "true"},
+ }
+
+ for i := 0; i < int(cr.Status.ReadyReplicas); i++ {
+ podName := fmt.Sprintf("splunk-test-ingestor-%d", i)
+ baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName, cr.GetName(), cr.GetNamespace())
+
+ for _, field := range propertyKVList {
+ req, _ := http.NewRequest("POST", baseURL, strings.NewReader(fmt.Sprintf("name=%s", field[0])))
+ mockHTTPClient.AddHandler(req, 200, "", nil)
+
+ updateURL := fmt.Sprintf("%s/%s", baseURL, field[0])
+ req, _ = http.NewRequest("POST", updateURL, strings.NewReader(fmt.Sprintf("%s=%s", field[1], field[2])))
+ mockHTTPClient.AddHandler(req, 200, "", nil)
+ }
+ }
+
+ mgr = newTestIngestorQueuePipelineManager(mockHTTPClient)
+
+ err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c)
+ assert.Nil(t, err)
+}
+
+func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, confName, body string) {
+ for i := 0; i < int(cr.Status.ReadyReplicas); i++ {
+ podName := fmt.Sprintf("splunk-%s-ingestor-%d", cr.GetName(), i)
+ baseURL := fmt.Sprintf(
+ "https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/%s",
+ podName, cr.GetName(), cr.GetNamespace(), confName,
+ )
+
+ createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.SQS.Name))
+ reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody))
+ mockHTTPClient.AddHandler(reqCreate, 200, "", nil)
+
+ updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.SQS.Name))
+ reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body))
+ mockHTTPClient.AddHandler(reqUpdate, 200, "", nil)
+ }
+}
+
+func newTestIngestorQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager {
+ newSplunkClientForQueuePipeline := func(uri, user, pass string) *splclient.SplunkClient {
+ return &splclient.SplunkClient{
+ ManagementURI: uri,
+ Username: user,
+ Password: pass,
+ Client: mockHTTPClient,
+ }
+ }
+ return &ingestorClusterPodManager{
+ newSplunkClient: newSplunkClientForQueuePipeline,
+ }
+}
+
+func TestIngScaledUpScaledDownEvent(t *testing.T) {
+ ctx := context.TODO()
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ crName := "test-ingestor"
+ cr := &enterpriseApi.IngestorCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: crName, Namespace: "test"},
+ }
+
+ // Simulate ScaledUp: previousReplicas=1, desiredReplicas=3, phase=PhaseReady, Status.ReadyReplicas=3
+ previousReplicas := int32(1)
+ desiredReplicas := int32(3)
+ cr.Status.ReadyReplicas = desiredReplicas
+ phase := enterpriseApi.PhaseReady
+
+ // Replicate the production conditional from ApplyIngestorCluster()
+ ep := GetEventPublisher(ctx, cr)
+ if phase == enterpriseApi.PhaseReady {
+ if desiredReplicas > previousReplicas && cr.Status.ReadyReplicas == desiredReplicas {
+ ep.Normal(ctx, "ScaledUp",
+ fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas))
+ }
+ }
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "ScaledUp" {
+ found = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type for ScaledUp, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, crName) {
+ t.Errorf("Expected event message to contain CR name '%s', got: %s", crName, event.message)
+ }
+ if !strings.Contains(event.message, "1") || !strings.Contains(event.message, "3") {
+ t.Errorf("Expected event message to contain replica counts, got: %s", event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected ScaledUp event to be published")
+ }
+
+ // Simulate ScaledDown: previousReplicas=3, desiredReplicas=1, phase=PhaseReady, Status.ReadyReplicas=1
+ recorder.events = []mockEvent{}
+ previousReplicas = int32(3)
+ desiredReplicas = int32(1)
+ cr.Status.ReadyReplicas = desiredReplicas
+
+ if phase == enterpriseApi.PhaseReady {
+ if desiredReplicas < previousReplicas && cr.Status.ReadyReplicas == desiredReplicas {
+ ep.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas))
+ }
+ }
+
+ found = false
+ for _, event := range recorder.events {
+ if event.reason == "ScaledDown" {
+ found = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type for ScaledDown, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, crName) {
+ t.Errorf("Expected event message to contain CR name '%s', got: %s", crName, event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected ScaledDown event to be published")
+ }
+
+ // Negative: no event when phase is not PhaseReady
+ recorder.events = []mockEvent{}
+ phase = enterpriseApi.PhasePending
+ if phase == enterpriseApi.PhaseReady {
+ if desiredReplicas < previousReplicas && cr.Status.ReadyReplicas == desiredReplicas {
+ ep.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas))
+ }
+ }
+ if len(recorder.events) != 0 {
+ t.Errorf("Expected no events when phase is not PhaseReady, got %d events", len(recorder.events))
+ }
+
+ // Negative: no event when replicas haven't converged
+ recorder.events = []mockEvent{}
+ phase = enterpriseApi.PhaseReady
+ cr.Status.ReadyReplicas = int32(2) // not yet at desiredReplicas
+ if phase == enterpriseApi.PhaseReady {
+ if desiredReplicas < previousReplicas && cr.Status.ReadyReplicas == desiredReplicas {
+ ep.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas))
+ }
+ }
+ if len(recorder.events) != 0 {
+ t.Errorf("Expected no events when replicas haven't converged, got %d events", len(recorder.events))
+ }
+}
+
+func TestIngQueueConfigUpdatedEvent(t *testing.T) {
+ ctx := context.TODO()
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ crName := "test-ingestor"
+ cr := &enterpriseApi.IngestorCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: crName, Namespace: "test"},
+ Spec: enterpriseApi.IngestorClusterSpec{
+ Replicas: 3,
+ },
+ }
+
+ // Replicate the production conditional from ApplyIngestorCluster()
+ ep := GetEventPublisher(ctx, cr)
+ ep.Normal(ctx, "QueueConfigUpdated",
+ fmt.Sprintf("Queue/Pipeline configuration updated for %d ingestors", cr.Spec.Replicas))
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "QueueConfigUpdated" {
+ found = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type for QueueConfigUpdated, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, "3") {
+ t.Errorf("Expected event message to contain replica count '3', got: %s", event.message)
+ }
+ if !strings.Contains(event.message, "Queue/Pipeline") {
+ t.Errorf("Expected event message to contain 'Queue/Pipeline', got: %s", event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected QueueConfigUpdated event to be published")
+ }
+}
+
+func TestIngIngestorsRestartedEvent(t *testing.T) {
+ ctx := context.TODO()
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ crName := "test-ingestor"
+ cr := &enterpriseApi.IngestorCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: crName, Namespace: "test"},
+ Spec: enterpriseApi.IngestorClusterSpec{
+ Replicas: 5,
+ },
+ }
+
+ // Replicate the production conditional from ApplyIngestorCluster()
+ ep := GetEventPublisher(ctx, cr)
+ ep.Normal(ctx, "IngestorsRestarted",
+ fmt.Sprintf("Restarted Splunk on %d ingestor pods", cr.Spec.Replicas))
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "IngestorsRestarted" {
+ found = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type for IngestorsRestarted, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, "5") {
+ t.Errorf("Expected event message to contain replica count '5', got: %s", event.message)
+ }
+ if !strings.Contains(event.message, "Restarted Splunk") {
+ t.Errorf("Expected event message to contain 'Restarted Splunk', got: %s", event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected IngestorsRestarted event to be published")
+ }
+}
diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go
index d603cbc9f..dd580433b 100644
--- a/pkg/splunk/enterprise/licensemanager.go
+++ b/pkg/splunk/enterprise/licensemanager.go
@@ -22,6 +22,7 @@ import (
"time"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ splclient "github.com/splunk/splunk-operator/pkg/splunk/client"
splutil "github.com/splunk/splunk-operator/pkg/splunk/util"
appsv1 "k8s.io/api/apps/v1"
@@ -35,6 +36,9 @@ import (
splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller"
)
+// newSplunkClientFunc is a package-level variable for creating Splunk clients, allowing test injection.
+var newSplunkClientFunc = splclient.NewSplunkClient
+
// ApplyLicenseManager reconciles the state for the Splunk Enterprise license manager.
func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (reconcile.Result, error) {
@@ -45,7 +49,8 @@ func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient,
}
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("ApplyLicenseManager")
- eventPublisher, _ := newK8EventPublisher(client, cr)
+
+ eventPublisher := GetEventPublisher(ctx, cr)
ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
cr.Kind = "LicenseManager"
@@ -59,7 +64,7 @@ func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient,
// validate and updates defaults for CR
err = validateLicenseManagerSpec(ctx, client, cr)
if err != nil {
- eventPublisher.Warning(ctx, "validateLicenseManagerSpec", fmt.Sprintf("validate licensemanager spec failed %s", err.Error()))
+ eventPublisher.Warning(ctx, "validateLicenseManagerSpec", fmt.Sprintf("validate license manager spec failed %s", err.Error()))
scopedLog.Error(err, "Failed to validate license manager spec")
return result, err
}
@@ -141,6 +146,12 @@ func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient,
return result, err
}
+ // Check for license-related pod failures before updating
+ if err = checkLicenseRelatedPodFailures(ctx, client, cr, statefulSet); err != nil {
+ scopedLog.Error(err, "License check failed")
+ return result, err
+ }
+
mgr := splctrl.DefaultStatefulSetPodManager{}
phase, err := mgr.Update(ctx, client, statefulSet, 1)
if err != nil {
@@ -220,6 +231,71 @@ func validateLicenseManagerSpec(ctx context.Context, c splcommon.ControllerClien
return validateCommonSplunkSpec(ctx, c, &cr.Spec.CommonSplunkSpec, cr)
}
+// checkLicenseRelatedPodFailures checks license status via Splunk API
+// and publishes warning event when expired license is detected
+func checkLicenseRelatedPodFailures(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager, statefulSet *appsv1.StatefulSet) error {
+ reqLogger := log.FromContext(ctx)
+ scopedLog := reqLogger.WithName("checkLicenseRelatedPodFailures")
+ eventPublisher := GetEventPublisher(ctx, cr)
+
+ replicas := int32(1)
+ if statefulSet.Spec.Replicas != nil {
+ replicas = *statefulSet.Spec.Replicas
+ }
+
+ for i := int32(0); i < replicas; i++ {
+ // Check if pod is ready before attempting API call
+ podName := fmt.Sprintf("%s-%d", statefulSet.GetName(), i)
+ namespacedName := types.NamespacedName{Namespace: statefulSet.GetNamespace(), Name: podName}
+ var pod corev1.Pod
+ err := client.Get(ctx, namespacedName, &pod)
+ if err != nil {
+ scopedLog.Info("Pod not found, skipping license check", "podName", podName)
+ continue
+ }
+
+ // Only check license if pod is running
+ if pod.Status.Phase != corev1.PodRunning {
+ scopedLog.Info("Pod not in running state, skipping license check", "podName", podName, "phase", pod.Status.Phase)
+ continue
+ }
+
+ // Get admin password from namespace-scoped secret
+ defaultSecretObjName := splcommon.GetNamespaceScopedSecretName(cr.GetNamespace())
+ defaultSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), defaultSecretObjName)
+ if err != nil {
+ return fmt.Errorf("failed to get namespace secret for license check: %w", err)
+ }
+
+ adminPassword := string(defaultSecret.Data["password"])
+ if adminPassword == "" {
+ return fmt.Errorf("admin password not found in secret %s", defaultSecretObjName)
+ }
+
+ // Create Splunk client
+ fqdnName := GetSplunkStatefulsetURL(cr.GetNamespace(), SplunkLicenseManager, cr.GetName(), i, false)
+ splunkClient := newSplunkClientFunc(fmt.Sprintf("https://%s:8089", fqdnName), "admin", adminPassword)
+
+ // Get license information from Splunk API
+ licenses, err := splunkClient.GetLicenseInfo()
+ if err != nil {
+ scopedLog.Error(err, "Failed to get license information from Splunk API", "podName", podName)
+ continue
+ }
+
+ // Check for expired licenses
+ for licenseName, licenseInfo := range licenses {
+ if licenseInfo.Status == "EXPIRED" {
+ eventPublisher.Warning(ctx, "LicenseExpired",
+ fmt.Sprintf("License '%s' has expired", licenseName))
+ scopedLog.Error(nil, "Detected expired license", "licenseName", licenseName, "title", licenseInfo.Title)
+ }
+ }
+ }
+
+ return nil
+}
+
// helper function to get the list of LicenseManager types in the current namespace
func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (enterpriseApi.LicenseManagerList, error) {
reqLogger := log.FromContext(ctx)
diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go
index ae5afb98a..8e2d8b358 100644
--- a/pkg/splunk/enterprise/licensemanager_test.go
+++ b/pkg/splunk/enterprise/licensemanager_test.go
@@ -27,6 +27,7 @@ import (
"github.com/pkg/errors"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/stretchr/testify/assert"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -35,6 +36,7 @@ import (
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
@@ -60,6 +62,7 @@ func TestApplyLicenseManager(t *testing.T) {
{MetaName: "*v1.Secret-test-splunk-test-secret"},
{MetaName: "*v1.Secret-test-splunk-stack1-license-manager-secret-v1"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-license-manager"},
+ {MetaName: "*v1.Pod-test-splunk-stack1-license-manager-0"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-license-manager"},
{MetaName: "*v4.LicenseManager-test-stack1"},
{MetaName: "*v4.LicenseManager-test-stack1"},
@@ -77,7 +80,7 @@ func TestApplyLicenseManager(t *testing.T) {
{ListOpts: listOpts},
}
createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[3], funcCalls[4], funcCalls[6], funcCalls[10], funcCalls[11]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}}
- updateFuncCalls := []spltest.MockFuncCall{funcCalls[0], funcCalls[1], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[6], funcCalls[9], funcCalls[10], funcCalls[11], funcCalls[12], funcCalls[11], funcCalls[13], funcCalls[13]}
+ updateFuncCalls := []spltest.MockFuncCall{funcCalls[0], funcCalls[1], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[6], funcCalls[9], funcCalls[10], funcCalls[11], funcCalls[12], funcCalls[13], funcCalls[13], funcCalls[14], funcCalls[15]}
updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[5]}, "List": {listmockCall[0]}}
current := enterpriseApi.LicenseManager{
TypeMeta: metav1.TypeMeta{
@@ -828,25 +831,54 @@ func TestLicenseManagerWithReadyState(t *testing.T) {
mclient.AddHandler(wantRequest2, 200, string(response2), nil)
// mock the verify RF peer funciton
+ savedVerifyRFPeers := VerifyRFPeers
+ defer func() { VerifyRFPeers = savedVerifyRFPeers }()
VerifyRFPeers = func(ctx context.Context, mgr indexerClusterPodManager, client splcommon.ControllerClient) error {
return nil
}
// Mock the addTelApp function for unit tests
+ savedAddTelApp := addTelApp
+ defer func() { addTelApp = savedAddTelApp }()
addTelApp = func(ctx context.Context, podExecClient splutil.PodExecClientImpl, replicas int32, cr splcommon.MetaObject) error {
return nil
}
+ // Initialize GlobalResourceTracker to enable app framework
+ initGlobalResourceTracker()
+
// create directory for app framework
newpath := filepath.Join("/tmp", "appframework")
_ = os.MkdirAll(newpath, os.ModePerm)
// adding getapplist to fix test case
+ savedGetAppsList := GetAppsList
+ defer func() { GetAppsList = savedGetAppsList }()
GetAppsList = func(ctx context.Context, remoteDataClientMgr RemoteDataClientManager) (splclient.RemoteDataListResponse, error) {
RemoteDataListResponse := splclient.RemoteDataListResponse{}
return RemoteDataListResponse, nil
}
+ // Mock GetPodExecClient to return a mock client that simulates pod operations locally
+ savedGetPodExecClient := splutil.GetPodExecClient
+ splutil.GetPodExecClient = func(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) splutil.PodExecClientImpl {
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: cr,
+ TargetPodName: targetPodName,
+ }
+ // Add mock responses for common commands
+ ctx := context.TODO()
+ // Mock mkdir command (used by createDirOnSplunkPods)
+ mockClient.AddMockPodExecReturnContext(ctx, "mkdir -p", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+ return mockClient
+ }
+ defer func() { splutil.GetPodExecClient = savedGetPodExecClient }()
+
sch := pkgruntime.NewScheme()
utilruntime.Must(clientgoscheme.AddToScheme(sch))
utilruntime.Must(corev1.AddToScheme(sch))
@@ -1190,7 +1222,7 @@ func TestLicenseManagerWithReadyState(t *testing.T) {
}
// call reconciliation
- _, err = ApplyClusterManager(ctx, c, clustermanager)
+ _, err = ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err)
debug.PrintStack()
@@ -1264,7 +1296,7 @@ func TestLicenseManagerWithReadyState(t *testing.T) {
}
// call reconciliation
- _, err = ApplyClusterManager(ctx, c, clustermanager)
+ _, err = ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err)
debug.PrintStack()
@@ -1316,3 +1348,217 @@ func TestLicenseManagerWithReadyState(t *testing.T) {
debug.PrintStack()
}
}
+
+func TestCheckLicenseRelatedPodFailures(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ type testCase struct {
+ name string
+ createPod bool
+ podPhase corev1.PodPhase
+ createSecret bool
+ password string
+ mockHTTPBody string
+ mockHTTPStatus int
+ mockHTTPErr error
+ expectEvent bool
+ expectedReason string
+ expectError bool
+ }
+
+ // Build mock API response with an expired license
+ expiredLicenseResponse := splclient.LicenseResponse{
+ Entry: []struct {
+ Name string `json:"name"`
+ Content splclient.LicenseInfo `json:"content"`
+ }{
+ {
+ Name: "enterprise-eval",
+ Content: splclient.LicenseInfo{
+ Title: "Splunk Enterprise Evaluation",
+ Status: "EXPIRED",
+ ExpirationTime: 1609459200,
+ },
+ },
+ },
+ }
+ expiredBody, _ := json.Marshal(expiredLicenseResponse)
+
+ // Build mock API response with a valid license
+ validLicenseResponse := splclient.LicenseResponse{
+ Entry: []struct {
+ Name string `json:"name"`
+ Content splclient.LicenseInfo `json:"content"`
+ }{
+ {
+ Name: "enterprise",
+ Content: splclient.LicenseInfo{
+ Title: "Splunk Enterprise",
+ Status: "VALID",
+ ExpirationTime: 1893456000,
+ },
+ },
+ },
+ }
+ validBody, _ := json.Marshal(validLicenseResponse)
+
+ tests := []testCase{
+ {
+ name: "Pod does not exist",
+ createPod: false,
+ expectEvent: false,
+ },
+ {
+ name: "Pod not in running state",
+ createPod: true,
+ podPhase: corev1.PodPending,
+ expectEvent: false,
+ },
+ {
+ name: "Pod running but no secret",
+ createPod: true,
+ podPhase: corev1.PodRunning,
+ createSecret: false,
+ expectEvent: false,
+ expectError: true,
+ },
+ {
+ name: "Pod running with empty password",
+ createPod: true,
+ podPhase: corev1.PodRunning,
+ createSecret: true,
+ password: "",
+ expectEvent: false,
+ expectError: true,
+ },
+ {
+ name: "API call fails gracefully",
+ createPod: true,
+ podPhase: corev1.PodRunning,
+ createSecret: true,
+ password: "testpassword",
+ mockHTTPStatus: 500,
+ mockHTTPBody: `{"error": "internal server error"}`,
+ mockHTTPErr: nil,
+ expectEvent: false,
+ expectError: false,
+ },
+ {
+ name: "Expired license emits LicenseExpired event",
+ createPod: true,
+ podPhase: corev1.PodRunning,
+ createSecret: true,
+ password: "testpassword",
+ mockHTTPStatus: 200,
+ mockHTTPBody: string(expiredBody),
+ expectEvent: true,
+ expectedReason: "LicenseExpired",
+ },
+ {
+ name: "Valid license emits no event",
+ createPod: true,
+ podPhase: corev1.PodRunning,
+ createSecret: true,
+ password: "testpassword",
+ mockHTTPStatus: 200,
+ mockHTTPBody: string(validBody),
+ expectEvent: false,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ lm := enterpriseApi.LicenseManager{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.LicenseManagerSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Mock: true,
+ },
+ },
+ }
+
+ c := spltest.NewMockClient()
+ fakeRecorder := record.NewFakeRecorder(10)
+ eventPublisher := &K8EventPublisher{recorder: fakeRecorder, instance: &lm}
+ ctx := context.WithValue(context.TODO(), splcommon.EventPublisherKey, eventPublisher)
+
+ statefulSet := &appsv1.StatefulSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-test-license-manager",
+ Namespace: "test",
+ },
+ }
+
+ if tc.createPod {
+ pod := &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-test-license-manager-0",
+ Namespace: "test",
+ },
+ Status: corev1.PodStatus{
+ Phase: tc.podPhase,
+ },
+ }
+ c.Create(ctx, pod)
+ }
+
+ if tc.createSecret {
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-test-secret",
+ Namespace: "test",
+ },
+ Data: map[string][]byte{
+ "password": []byte(tc.password),
+ },
+ }
+ c.Create(ctx, secret)
+ }
+
+ // Override newSplunkClientFunc to inject mock HTTP client when API call is expected
+ if tc.password != "" && tc.createSecret {
+ mockHTTPClient := &spltest.MockHTTPClient{}
+ wantRequest, _ := http.NewRequest("GET",
+ "https://splunk-test-license-manager-0.splunk-test-license-manager-headless.test.svc.cluster.local:8089/services/licenser/licenses?output_mode=json", nil)
+ mockHTTPClient.AddHandler(wantRequest, tc.mockHTTPStatus, tc.mockHTTPBody, tc.mockHTTPErr)
+
+ origFunc := newSplunkClientFunc
+ newSplunkClientFunc = func(managementURI, username, password string) *splclient.SplunkClient {
+ client := splclient.NewSplunkClient(managementURI, username, password)
+ client.Client = mockHTTPClient
+ return client
+ }
+ defer func() { newSplunkClientFunc = origFunc }()
+ }
+
+ err := checkLicenseRelatedPodFailures(ctx, c, &lm, statefulSet)
+
+ if tc.expectError {
+ assert.Error(t, err, "Expected an error but got none")
+ } else {
+ assert.NoError(t, err, "Expected no error but got one")
+ }
+
+ // Check events from the fake recorder
+ if tc.expectEvent {
+ select {
+ case event := <-fakeRecorder.Events:
+ assert.Contains(t, event, tc.expectedReason, "Event reason mismatch")
+ assert.Contains(t, event, "Warning", "Event type mismatch")
+ default:
+ t.Errorf("Expected %s event to be published, but none were", tc.expectedReason)
+ }
+ } else {
+ select {
+ case event := <-fakeRecorder.Events:
+ t.Errorf("Expected no events, but got: %s", event)
+ default:
+ // No events, as expected
+ }
+ }
+ })
+ }
+}
diff --git a/pkg/splunk/enterprise/licensemaster.go b/pkg/splunk/enterprise/licensemaster.go
index 30ff75046..00adcc9e0 100644
--- a/pkg/splunk/enterprise/licensemaster.go
+++ b/pkg/splunk/enterprise/licensemaster.go
@@ -46,7 +46,8 @@ func ApplyLicenseMaster(ctx context.Context, client splcommon.ControllerClient,
}
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("ApplyLicenseMaster")
- eventPublisher, _ := newK8EventPublisher(client, cr)
+
+ eventPublisher := GetEventPublisher(ctx, cr)
ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
var err error
diff --git a/pkg/splunk/enterprise/licensemaster_test.go b/pkg/splunk/enterprise/licensemaster_test.go
index 9044dff89..467c561d0 100644
--- a/pkg/splunk/enterprise/licensemaster_test.go
+++ b/pkg/splunk/enterprise/licensemaster_test.go
@@ -844,20 +844,55 @@ func TestLicenseMasterWithReadyState(t *testing.T) {
mclient.AddHandler(wantRequest2, 200, string(response2), nil)
// mock the verify RF peer funciton
+ savedVerifyRFPeers := VerifyRFPeers
+ defer func() { VerifyRFPeers = savedVerifyRFPeers }()
VerifyRFPeers = func(ctx context.Context, mgr indexerClusterPodManager, client splcommon.ControllerClient) error {
return nil
}
+ // Mock VerifyCMasterisMultisite to avoid HTTP timeout when ApplyClusterMaster is called
+ savedVerifyCMasterisMultisite := VerifyCMasterisMultisite
+ defer func() { VerifyCMasterisMultisite = savedVerifyCMasterisMultisite }()
+ VerifyCMasterisMultisite = func(ctx context.Context, cr *enterpriseApiV3.ClusterMaster, namespaceScopedSecret *corev1.Secret) ([]corev1.EnvVar, error) {
+ extraEnv := getClusterMasterExtraEnv(cr, &cr.Spec.CommonSplunkSpec)
+ return extraEnv, nil
+ }
+
+ // Initialize GlobalResourceTracker to enable app framework
+ initGlobalResourceTracker()
+
// create directory for app framework
newpath := filepath.Join("/tmp", "appframework")
_ = os.MkdirAll(newpath, os.ModePerm)
// adding getapplist to fix test case
+ savedGetAppsList := GetAppsList
+ defer func() { GetAppsList = savedGetAppsList }()
GetAppsList = func(ctx context.Context, remoteDataClientMgr RemoteDataClientManager) (splclient.RemoteDataListResponse, error) {
RemoteDataListResponse := splclient.RemoteDataListResponse{}
return RemoteDataListResponse, nil
}
+ // Mock GetPodExecClient to return a mock client that simulates pod operations locally
+ savedGetPodExecClient := splutil.GetPodExecClient
+ splutil.GetPodExecClient = func(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) splutil.PodExecClientImpl {
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: cr,
+ TargetPodName: targetPodName,
+ }
+ // Add mock responses for common commands
+ ctx := context.TODO()
+ // Mock mkdir command (used by createDirOnSplunkPods)
+ mockClient.AddMockPodExecReturnContext(ctx, "mkdir -p", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+ return mockClient
+ }
+ defer func() { splutil.GetPodExecClient = savedGetPodExecClient }()
+
sch := pkgruntime.NewScheme()
utilruntime.Must(clientgoscheme.AddToScheme(sch))
utilruntime.Must(corev1.AddToScheme(sch))
@@ -914,6 +949,8 @@ func TestLicenseMasterWithReadyState(t *testing.T) {
}
// Mock the addTelApp function for unit tests
+ savedAddTelApp := addTelApp
+ defer func() { addTelApp = savedAddTelApp }()
addTelApp = func(ctx context.Context, podExecClient splutil.PodExecClientImpl, replicas int32, cr splcommon.MetaObject) error {
return nil
}
diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go
index 64de4a2de..0bbc54047 100644
--- a/pkg/splunk/enterprise/monitoringconsole.go
+++ b/pkg/splunk/enterprise/monitoringconsole.go
@@ -33,7 +33,6 @@ import (
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
rclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -49,7 +48,8 @@ func ApplyMonitoringConsole(ctx context.Context, client splcommon.ControllerClie
}
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("ApplyMonitoringConsole")
- eventPublisher, _ := newK8EventPublisher(client, cr)
+
+ eventPublisher := GetEventPublisher(ctx, cr)
ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
cr.Kind = "MonitoringConsole"
@@ -207,7 +207,7 @@ func getMonitoringConsoleStatefulSet(ctx context.Context, client splcommon.Contr
}
// helper function to get the list of MonitoringConsole types in the current namespace
-func getMonitoringConsoleList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (enterpriseApi.MonitoringConsoleList, error) {
+func getMonitoringConsoleList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (enterpriseApi.MonitoringConsoleList, error) {
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("getMonitoringConsoleList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
@@ -377,7 +377,9 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor
func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error {
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("changeMonitoringConsoleAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
- eventPublisher, _ := newK8EventPublisher(client, cr)
+
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, cr)
monitoringConsoleInstance := &enterpriseApi.MonitoringConsole{}
if len(cr.Spec.MonitoringConsoleRef.Name) > 0 {
diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go
index 3108c7d6a..af3eac5b4 100644
--- a/pkg/splunk/enterprise/monitoringconsole_test.go
+++ b/pkg/splunk/enterprise/monitoringconsole_test.go
@@ -15,6 +15,12 @@ package enterprise
import (
"context"
+ "os"
+ "path/filepath"
+ "runtime/debug"
+ "testing"
+ "time"
+
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
splclient "github.com/splunk/splunk-operator/pkg/splunk/client"
splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
@@ -27,13 +33,8 @@ import (
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
- "os"
- "path/filepath"
- "runtime/debug"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- "testing"
- "time"
)
func init() {
@@ -1180,7 +1181,7 @@ func TestChangeMonitoringConsoleAnnotations(t *testing.T) {
cm.Spec.Image = "splunk/splunk:latest"
// Create the instances
client.Create(ctx, cm)
- _, err := ApplyClusterManager(ctx, client, cm)
+ _, err := ApplyClusterManager(ctx, client, cm, nil)
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
}
diff --git a/pkg/splunk/enterprise/names.go b/pkg/splunk/enterprise/names.go
index 3d0439db7..e49782f59 100644
--- a/pkg/splunk/enterprise/names.go
+++ b/pkg/splunk/enterprise/names.go
@@ -201,13 +201,23 @@ access = read : [ * ], write : [ admin ]
`
// Command to create telemetry app on non SHC scenarios
- createTelAppNonShcString = "mkdir -p /opt/splunk/etc/apps/app_tel_for_sok8s_%s/default/; mkdir -p /opt/splunk/etc/apps/app_tel_for_sok8s_%s/metadata/; echo -e \"%s\" > /opt/splunk/etc/apps/app_tel_for_sok8s_%s/default/app.conf; echo -e \"%s\" > /opt/splunk/etc/apps/app_tel_for_sok8s_%s/metadata/default.meta"
+ createTelAppNonShcString = "mkdir -p /opt/splunk/etc/apps/app_tel_for_sok/default/; mkdir -p /opt/splunk/etc/apps/app_tel_for_sok/metadata/; printf '%%s' \"%s\" > /opt/splunk/etc/apps/app_tel_for_sok/default/app.conf; printf '%%s' \"%s\" > /opt/splunk/etc/apps/app_tel_for_sok/metadata/default.meta"
// Command to create telemetry app on SHC scenarios
- createTelAppShcString = "mkdir -p %s/app_tel_for_sok8s_%s/default/; mkdir -p %s/app_tel_for_sok8s_%s/metadata/; echo -e \"%s\" > %s/app_tel_for_sok8s_%s/default/app.conf; echo -e \"%s\" > %s/app_tel_for_sok8s_%s/metadata/default.meta"
+ createTelAppShcString = "mkdir -p %s/app_tel_for_sok/default/; mkdir -p %s/app_tel_for_sok/metadata/; printf '%%s' \"%s\" > %s/app_tel_for_sok/default/app.conf; printf '%%s' \"%s\" > %s/app_tel_for_sok/metadata/default.meta"
// Command to reload app configuration
telAppReloadString = "curl -k -u admin:`cat /mnt/splunk-secrets/password` https://localhost:8089/services/apps/local/_reload"
+
+ // Name of the telemetry configmap: -manager-telemetry
+ telConfigMapTemplateStr = "%smanager-telemetry"
+
+ // Name of the telemetry app: app_tel_for_sok
+ telAppNameStr = "app_tel_for_sok"
+ telSOKVersionKey = "version"
+ telLicenseInfoKey = "license_info"
+
+ managerConfigMapTemplateStr = "%smanager-config"
)
const (
@@ -363,3 +373,13 @@ func GetLivenessDriverFileDir() string {
func GetStartupScriptName() string {
return startupScriptName
}
+
+// GetTelemetryConfigMapName returns the name of telemetry configmap
+func GetTelemetryConfigMapName(namePrefix string) string {
+ return fmt.Sprintf(telConfigMapTemplateStr, namePrefix)
+}
+
+// GetManagerConfigMapName returns the name of manager configmap
+func GetManagerConfigMapName(namePrefix string) string {
+ return fmt.Sprintf(managerConfigMapTemplateStr, namePrefix)
+}
diff --git a/pkg/splunk/enterprise/objectstorage.go b/pkg/splunk/enterprise/objectstorage.go
new file mode 100644
index 000000000..52f3bfd4d
--- /dev/null
+++ b/pkg/splunk/enterprise/objectstorage.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package enterprise
+
+import (
+ "context"
+ "time"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
+ splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+)
+
+// ApplyObjectStorage reconciles the state of an IngestorCluster custom resource
+func ApplyObjectStorage(ctx context.Context, client client.Client, cr *enterpriseApi.ObjectStorage) (reconcile.Result, error) {
+ var err error
+
+ // Unless modified, reconcile for this object will be requeued after 5 seconds
+ result := reconcile.Result{
+ Requeue: true,
+ RequeueAfter: time.Second * 5,
+ }
+
+ if cr.Status.ResourceRevMap == nil {
+ cr.Status.ResourceRevMap = make(map[string]string)
+ }
+
+ eventPublisher := GetEventPublisher(ctx, cr)
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ cr.Kind = "ObjectStorage"
+
+ // Initialize phase
+ cr.Status.Phase = enterpriseApi.PhaseError
+
+ // Update the CR Status
+ defer updateCRStatus(ctx, client, cr, &err)
+
+ // Check if deletion has been requested
+ if cr.ObjectMeta.DeletionTimestamp != nil {
+ terminating, err := splctrl.CheckForDeletion(ctx, cr, client)
+ if terminating && err != nil {
+ cr.Status.Phase = enterpriseApi.PhaseTerminating
+ } else {
+ result.Requeue = false
+ }
+ return result, err
+ }
+
+ cr.Status.Phase = enterpriseApi.PhaseReady
+
+ // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration.
+ // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter.
+ if !result.Requeue {
+ result.RequeueAfter = 0
+ }
+
+ return result, nil
+}
diff --git a/pkg/splunk/enterprise/objectstorage_test.go b/pkg/splunk/enterprise/objectstorage_test.go
new file mode 100644
index 000000000..632dc2390
--- /dev/null
+++ b/pkg/splunk/enterprise/objectstorage_test.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package enterprise
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "testing"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/stretchr/testify/assert"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+func init() {
+ GetReadinessScriptLocation = func() string {
+ fileLocation, _ := filepath.Abs("../../../" + readinessScriptLocation)
+ return fileLocation
+ }
+ GetLivenessScriptLocation = func() string {
+ fileLocation, _ := filepath.Abs("../../../" + livenessScriptLocation)
+ return fileLocation
+ }
+ GetStartupScriptLocation = func() string {
+ fileLocation, _ := filepath.Abs("../../../" + startupScriptLocation)
+ return fileLocation
+ }
+}
+
+func TestApplyObjectStorage(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ ctx := context.TODO()
+
+ scheme := runtime.NewScheme()
+ _ = enterpriseApi.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+ _ = appsv1.AddToScheme(scheme)
+ c := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ // Object definitions
+ os := &enterpriseApi.ObjectStorage{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ObjectStorage",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "os",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-west-2.amazonaws.com",
+ Path: "bucket/key",
+ },
+ },
+ }
+ c.Create(ctx, os)
+
+ // ApplyObjectStorage
+ result, err := ApplyObjectStorage(ctx, c, os)
+ assert.NoError(t, err)
+ assert.True(t, result.Requeue)
+ assert.NotEqual(t, enterpriseApi.PhaseError, os.Status.Phase)
+ assert.Equal(t, enterpriseApi.PhaseReady, os.Status.Phase)
+}
diff --git a/pkg/splunk/enterprise/queue.go b/pkg/splunk/enterprise/queue.go
new file mode 100644
index 000000000..e10af1ca7
--- /dev/null
+++ b/pkg/splunk/enterprise/queue.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package enterprise
+
+import (
+ "context"
+ "time"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
+ splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+)
+
+// ApplyQueue reconciles the state of an IngestorCluster custom resource
+func ApplyQueue(ctx context.Context, client client.Client, cr *enterpriseApi.Queue) (reconcile.Result, error) {
+ var err error
+
+ // Unless modified, reconcile for this object will be requeued after 5 seconds
+ result := reconcile.Result{
+ Requeue: true,
+ RequeueAfter: time.Second * 5,
+ }
+
+ if cr.Status.ResourceRevMap == nil {
+ cr.Status.ResourceRevMap = make(map[string]string)
+ }
+
+ eventPublisher := GetEventPublisher(ctx, cr)
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ cr.Kind = "Queue"
+
+ // Initialize phase
+ cr.Status.Phase = enterpriseApi.PhaseError
+
+ // Update the CR Status
+ defer updateCRStatus(ctx, client, cr, &err)
+
+ // Check if deletion has been requested
+ if cr.ObjectMeta.DeletionTimestamp != nil {
+ terminating, err := splctrl.CheckForDeletion(ctx, cr, client)
+ if terminating && err != nil {
+ cr.Status.Phase = enterpriseApi.PhaseTerminating
+ } else {
+ result.Requeue = false
+ }
+ return result, err
+ }
+
+ cr.Status.Phase = enterpriseApi.PhaseReady
+
+ // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration.
+ // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter.
+ if !result.Requeue {
+ result.RequeueAfter = 0
+ }
+
+ return result, nil
+}
diff --git a/pkg/splunk/enterprise/queue_test.go b/pkg/splunk/enterprise/queue_test.go
new file mode 100644
index 000000000..ae7c08cf7
--- /dev/null
+++ b/pkg/splunk/enterprise/queue_test.go
@@ -0,0 +1,70 @@
+// Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package enterprise
+
+import (
+ "context"
+ "os"
+ "testing"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/stretchr/testify/assert"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+func TestApplyQueue(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ ctx := context.TODO()
+
+ scheme := runtime.NewScheme()
+ _ = enterpriseApi.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+ _ = appsv1.AddToScheme(scheme)
+ c := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ // Object definitions
+ queue := &enterpriseApi.Queue{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Queue",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "queue",
+ Namespace: "test",
+ },
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "test-queue",
+ AuthRegion: "us-west-2",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ DLQ: "sqs-dlq-test",
+ },
+ },
+ }
+ c.Create(ctx, queue)
+
+ // ApplyQueue
+ result, err := ApplyQueue(ctx, c, queue)
+ assert.NoError(t, err)
+ assert.True(t, result.Requeue)
+ assert.NotEqual(t, enterpriseApi.PhaseError, queue.Status.Phase)
+ assert.Equal(t, enterpriseApi.PhaseReady, queue.Status.Phase)
+}
diff --git a/pkg/splunk/enterprise/searchheadcluster.go b/pkg/splunk/enterprise/searchheadcluster.go
index bb55671e2..f6b9cbcfb 100644
--- a/pkg/splunk/enterprise/searchheadcluster.go
+++ b/pkg/splunk/enterprise/searchheadcluster.go
@@ -47,8 +47,8 @@ func ApplySearchHeadCluster(ctx context.Context, client splcommon.ControllerClie
}
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("ApplySearchHeadCluster")
- eventPublisher, _ := newK8EventPublisher(client, cr)
+ eventPublisher := GetEventPublisher(ctx, cr)
ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
cr.Kind = "SearchHeadCluster"
@@ -260,6 +260,9 @@ func ApplySearchHeadCluster(ctx context.Context, client splcommon.ControllerClie
// ApplyShcSecret checks if any of the search heads have a different shc_secret from namespace scoped secret and changes it
func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, replicas int32, podExecClient splutil.PodExecClientImpl) error {
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, mgr.cr)
+
// Get namespace scoped secret
namespaceSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, mgr.c, mgr.cr.GetNamespace())
if err != nil {
@@ -289,6 +292,7 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli
nsAdminSecret := string(namespaceSecret.Data["password"])
// Loop over all sh pods and get individual pod's shc_secret
+ howManyPodsHaveSecretChanged := 0
for i := int32(0); i <= replicas-1; i++ {
// Get search head pod's name
shPodName := GetSplunkStatefulsetPodName(SplunkSearchHead, mgr.cr.GetName(), i)
@@ -329,14 +333,26 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli
_, _, err = podExecClient.RunPodExecCommand(ctx, streamOptions, []string{"/bin/sh"})
if err != nil {
+ // Emit event for password sync failure
+ if eventPublisher != nil {
+ eventPublisher.Warning(ctx, "PasswordSyncFailed",
+ fmt.Sprintf("Password sync failed for pod '%s': %s. Check pod logs and secret format.", shPodName, err.Error()))
+ }
return err
}
scopedLog.Info("shcSecret changed")
+ howManyPodsHaveSecretChanged += 1
+
// Get client for Pod and restart splunk instance on pod
shClient := mgr.getClient(ctx, i)
err = shClient.RestartSplunk()
if err != nil {
+ // Emit event for password sync failure
+ if eventPublisher != nil {
+ eventPublisher.Warning(ctx, "PasswordSyncFailed",
+ fmt.Sprintf("Password sync failed for pod '%s': %s. Check pod logs and secret format.", shPodName, err.Error()))
+ }
return err
}
scopedLog.Info("Restarted Splunk")
@@ -403,6 +419,7 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli
Update the admin password on secret mounted on SHC pod to ensure successful authentication.
*/
if len(mgr.cr.Status.AdminPasswordChangedSecrets) > 0 {
+
for podSecretName := range mgr.cr.Status.AdminPasswordChangedSecrets {
podSecret, err := splutil.GetSecretByName(ctx, mgr.c, mgr.cr.GetNamespace(), podSecretName)
if err != nil {
@@ -417,6 +434,12 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli
}
}
+ // Emit event for password sync completed
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "PasswordSyncCompleted",
+ fmt.Sprintf("Password synchronized for %d pods", howManyPodsHaveSecretChanged))
+ }
+
return nil
}
diff --git a/pkg/splunk/enterprise/searchheadcluster_test.go b/pkg/splunk/enterprise/searchheadcluster_test.go
index 569d0be8a..3d11a539e 100644
--- a/pkg/splunk/enterprise/searchheadcluster_test.go
+++ b/pkg/splunk/enterprise/searchheadcluster_test.go
@@ -662,6 +662,90 @@ func TestApplyShcSecret(t *testing.T) {
}
}
+func TestShcPasswordSyncCompleted(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ sch := pkgruntime.NewScheme()
+ utilruntime.Must(clientgoscheme.AddToScheme(sch))
+ utilruntime.Must(corev1.AddToScheme(sch))
+ utilruntime.Must(enterpriseApi.AddToScheme(sch))
+
+ builder := fake.NewClientBuilder().
+ WithScheme(sch).
+ WithStatusSubresource(&enterpriseApi.SearchHeadCluster{})
+
+ client := builder.Build()
+ ctx := context.TODO()
+
+ // Create a mock event recorder to capture events
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+
+ shc := enterpriseApi.SearchHeadCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "SearchHeadCluster",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "shc",
+ Namespace: "test",
+ },
+ }
+ shc.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("SearchHeadCluster"))
+
+ err := client.Create(ctx, &shc)
+ if err != nil {
+ t.Fatalf("Failed to create SearchHeadCluster: %v", err)
+ }
+
+ // Create namespace scoped secret so ApplyShcSecret has something to work with
+ nsSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, client, shc.GetNamespace())
+ if err != nil {
+ t.Fatalf("Failed to apply namespace scoped secret: %v", err)
+ }
+
+ // Set CR status resource version to a stale value so ApplyShcSecret does not early-return
+ shc.Status.NamespaceSecretResourceVersion = nsSecret.ResourceVersion + "-old"
+ shc.Status.AdminPasswordChangedSecrets = make(map[string]bool)
+
+ // Initialize a minimal pod manager for ApplyShcSecret
+ mgr := &searchHeadClusterPodManager{
+ c: client,
+ log: logt.WithName("TestShcPasswordSyncCompleted"),
+ cr: &shc,
+ }
+
+ // Use a mock PodExec client; replicas will be 0 so it won't be exercised
+ var mockPodExecClient *spltest.MockPodExecClient = &spltest.MockPodExecClient{}
+
+ // Add event publisher to context so ApplyShcSecret can emit events
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ // Call ApplyShcSecret; with 0 replicas it will complete without touching pods,
+ // but still emit the PasswordSyncCompleted event
+ err = ApplyShcSecret(ctx, mgr, 0, mockPodExecClient)
+ if err != nil {
+ t.Errorf("Couldn't apply shc secret %s", err.Error())
+ }
+
+ // Check that PasswordSyncCompleted event was published
+ foundEvent := false
+ for _, event := range recorder.events {
+ if event.reason == "PasswordSyncCompleted" {
+ foundEvent = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, "Password synchronized") {
+ t.Errorf("Expected event message to contain 'Password synchronized', got: %s", event.message)
+ }
+ break
+ }
+ }
+ if !foundEvent {
+ t.Errorf("Expected PasswordSyncCompleted event to be published")
+ }
+}
+
func TestGetSearchHeadStatefulSet(t *testing.T) {
os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
ctx := context.TODO()
@@ -2074,3 +2158,201 @@ func TestSetDeployerConfig(t *testing.T) {
t.Errorf("Failed to set deployer resources properly, requests are off")
}
}
+
+func TestShcPasswordSyncFailedEvent(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ sch := pkgruntime.NewScheme()
+ utilruntime.Must(clientgoscheme.AddToScheme(sch))
+ utilruntime.Must(corev1.AddToScheme(sch))
+ utilruntime.Must(enterpriseApi.AddToScheme(sch))
+
+ builder := fake.NewClientBuilder().
+ WithScheme(sch).
+ WithStatusSubresource(&enterpriseApi.SearchHeadCluster{})
+
+ c := builder.Build()
+ ctx := context.TODO()
+
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ // Create namespace scoped secret
+ nsSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, c, "test")
+ if err != nil {
+ t.Fatalf("Failed to apply namespace scoped secret: %v", err)
+ }
+
+ shc := enterpriseApi.SearchHeadCluster{
+ TypeMeta: metav1.TypeMeta{Kind: "SearchHeadCluster"},
+ ObjectMeta: metav1.ObjectMeta{Name: "shc", Namespace: "test"},
+ }
+ shc.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("SearchHeadCluster"))
+ // Set stale resource version so ApplyShcSecret doesn't early-return
+ shc.Status.NamespaceSecretResourceVersion = nsSecret.ResourceVersion + "-old"
+ shc.Status.AdminPasswordChangedSecrets = make(map[string]bool)
+
+ // Create the search head pod with a secret volume mount
+ podSecretName := "splunk-shc-search-head-secret-v1"
+ shPodName := "splunk-shc-search-head-0"
+ pod := &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: shPodName, Namespace: "test"},
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{{Name: "splunk", Image: "splunk/splunk:latest"}},
+ Volumes: []corev1.Volume{
+ {
+ Name: "mnt-splunk-secrets",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{SecretName: podSecretName},
+ },
+ },
+ },
+ },
+ }
+ if err := c.Create(ctx, pod); err != nil {
+ t.Fatalf("Failed to create pod: %v", err)
+ }
+
+ // Create the pod's secret with a DIFFERENT shc_secret than namespace secret
+ podSecret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: podSecretName, Namespace: "test"},
+ Data: map[string][]byte{
+ "password": []byte("admin-password"),
+ "shc_secret": []byte("old-shc-secret"),
+ },
+ }
+ if err := c.Create(ctx, podSecret); err != nil {
+ t.Fatalf("Failed to create pod secret: %v", err)
+ }
+
+ mgr := &searchHeadClusterPodManager{
+ c: c,
+ log: logt.WithName("TestShcPasswordSyncFailedEvent"),
+ cr: &shc,
+ }
+
+ // Configure mock pod exec client to return an error on shcluster-config command
+ mockPodExecClient := &spltest.MockPodExecClient{}
+ mockPodExecClient.AddMockPodExecReturnContext(ctx, "shcluster-config", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "connection refused",
+ Err: fmt.Errorf("connection refused"),
+ })
+
+ // Call ApplyShcSecret — should fail at RunPodExecCommand and emit PasswordSyncFailed
+ err = ApplyShcSecret(ctx, mgr, 1, mockPodExecClient)
+ if err == nil {
+ t.Errorf("Expected error from ApplyShcSecret when pod exec fails")
+ }
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "PasswordSyncFailed" {
+ found = true
+ if event.eventType != corev1.EventTypeWarning {
+ t.Errorf("Expected Warning event type for PasswordSyncFailed, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, shPodName) {
+ t.Errorf("Expected event message to contain pod name '%s', got: %s", shPodName, event.message)
+ }
+ if !strings.Contains(event.message, "connection refused") {
+ t.Errorf("Expected event message to contain error details, got: %s", event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected PasswordSyncFailed event to be published")
+ }
+}
+
+func TestShcScaledUpScaledDownEvent(t *testing.T) {
+ ctx := context.TODO()
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ crName := "test-shc"
+ cr := &enterpriseApi.SearchHeadCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: crName, Namespace: "test"},
+ }
+
+ // Simulate ScaledUp: previousReplicas=3, desiredReplicas=5, phase=PhaseReady, Status.Replicas=5
+ previousReplicas := int32(3)
+ desiredReplicas := int32(5)
+ cr.Status.Replicas = desiredReplicas
+ phase := enterpriseApi.PhaseReady
+
+ // Replicate the production conditional from searchHeadClusterPodManager.Update()
+ ep := GetEventPublisher(ctx, cr)
+ if phase == enterpriseApi.PhaseReady {
+ if desiredReplicas > previousReplicas && cr.Status.Replicas == desiredReplicas {
+ ep.Normal(ctx, "ScaledUp",
+ fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas))
+ }
+ }
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "ScaledUp" {
+ found = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type for ScaledUp, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, crName) {
+ t.Errorf("Expected event message to contain CR name '%s', got: %s", crName, event.message)
+ }
+ if !strings.Contains(event.message, "3") || !strings.Contains(event.message, "5") {
+ t.Errorf("Expected event message to contain replica counts, got: %s", event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected ScaledUp event to be published")
+ }
+
+ // Simulate ScaledDown: previousReplicas=5, desiredReplicas=3, phase=PhaseReady, Status.Replicas=3
+ recorder.events = []mockEvent{}
+ previousReplicas = int32(5)
+ desiredReplicas = int32(3)
+ cr.Status.Replicas = desiredReplicas
+
+ if phase == enterpriseApi.PhaseReady {
+ if desiredReplicas < previousReplicas && cr.Status.Replicas == desiredReplicas {
+ ep.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas))
+ }
+ }
+
+ found = false
+ for _, event := range recorder.events {
+ if event.reason == "ScaledDown" {
+ found = true
+ if event.eventType != corev1.EventTypeNormal {
+ t.Errorf("Expected Normal event type for ScaledDown, got %s", event.eventType)
+ }
+ if !strings.Contains(event.message, crName) {
+ t.Errorf("Expected event message to contain CR name '%s', got: %s", crName, event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected ScaledDown event to be published")
+ }
+
+ // Negative: no event when phase is not PhaseReady
+ recorder.events = []mockEvent{}
+ phase = enterpriseApi.PhasePending
+ if phase == enterpriseApi.PhaseReady {
+ if desiredReplicas < previousReplicas && cr.Status.Replicas == desiredReplicas {
+ ep.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas))
+ }
+ }
+ if len(recorder.events) != 0 {
+ t.Errorf("Expected no events when phase is not PhaseReady, got %d events", len(recorder.events))
+ }
+}
diff --git a/pkg/splunk/enterprise/searchheadclusterpodmanager.go b/pkg/splunk/enterprise/searchheadclusterpodmanager.go
index 093ce9fe9..7b3a19d30 100644
--- a/pkg/splunk/enterprise/searchheadclusterpodmanager.go
+++ b/pkg/splunk/enterprise/searchheadclusterpodmanager.go
@@ -45,6 +45,12 @@ func (mgr *searchHeadClusterPodManager) Update(ctx context.Context, c splcommon.
mgr.c = c
}
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, mgr.cr)
+
+ // Track last successful replica count to emit scale events after completion
+ previousReplicas := mgr.cr.Status.Replicas
+
// update statefulset, if necessary
_, err := splctrl.ApplyStatefulSet(ctx, mgr.c, statefulSet)
if err != nil {
@@ -68,7 +74,27 @@ func (mgr *searchHeadClusterPodManager) Update(ctx context.Context, c splcommon.
}
// manage scaling and updates
- return splctrl.UpdateStatefulSetPods(ctx, mgr.c, statefulSet, mgr, desiredReplicas)
+ phase, err := splctrl.UpdateStatefulSetPods(ctx, mgr.c, statefulSet, mgr, desiredReplicas)
+ if err != nil {
+ return phase, err
+ }
+
+ // Emit ScaledUp event only after a successful scale-up has completed
+ if phase == enterpriseApi.PhaseReady {
+ if desiredReplicas > previousReplicas && mgr.cr.Status.Replicas == desiredReplicas {
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "ScaledUp",
+ fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", mgr.cr.GetName(), previousReplicas, desiredReplicas))
+ }
+ } else if desiredReplicas < previousReplicas && mgr.cr.Status.Replicas == desiredReplicas {
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", mgr.cr.GetName(), previousReplicas, desiredReplicas))
+ }
+ }
+ }
+
+ return phase, nil
}
// PrepareScaleDown for searchHeadClusterPodManager prepares search head pod to be removed via scale down event; it returns true when ready
diff --git a/pkg/splunk/enterprise/standalone.go b/pkg/splunk/enterprise/standalone.go
index dbfa17051..7e9f3f05a 100644
--- a/pkg/splunk/enterprise/standalone.go
+++ b/pkg/splunk/enterprise/standalone.go
@@ -48,7 +48,8 @@ func ApplyStandalone(ctx context.Context, client splcommon.ControllerClient, cr
if cr.Status.ResourceRevMap == nil {
cr.Status.ResourceRevMap = make(map[string]string)
}
- eventPublisher, _ := newK8EventPublisher(client, cr)
+
+ eventPublisher := GetEventPublisher(ctx, cr)
ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
cr.Kind = "Standalone"
@@ -214,6 +215,9 @@ func ApplyStandalone(ctx context.Context, client splcommon.ControllerClient, cr
return result, err
}
+ // Track previous ready replicas for scaling events
+ previousReadyReplicas := cr.Status.ReadyReplicas
+
mgr := splctrl.DefaultStatefulSetPodManager{}
phase, err := mgr.Update(ctx, client, statefulSet, cr.Spec.Replicas)
cr.Status.ReadyReplicas = statefulSet.Status.ReadyReplicas
@@ -224,6 +228,24 @@ func ApplyStandalone(ctx context.Context, client splcommon.ControllerClient, cr
}
cr.Status.Phase = phase
+ // Emit scale events when phase is ready and ready replicas changed to match desired
+ if phase == enterpriseApi.PhaseReady {
+ desiredReplicas := cr.Spec.Replicas
+ if cr.Status.ReadyReplicas == desiredReplicas && previousReadyReplicas != desiredReplicas {
+ if desiredReplicas > previousReadyReplicas {
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "ScaledUp",
+ fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", cr.GetName(), previousReadyReplicas, desiredReplicas))
+ }
+ } else if desiredReplicas < previousReadyReplicas {
+ if eventPublisher != nil {
+ eventPublisher.Normal(ctx, "ScaledDown",
+ fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReadyReplicas, desiredReplicas))
+ }
+ }
+ }
+ }
+
if cr.Spec.MonitoringConsoleRef.Name != "" {
_, err = ApplyMonitoringConsoleEnvConfigMap(ctx, client, cr.GetNamespace(), cr.GetName(), cr.Spec.MonitoringConsoleRef.Name, getStandaloneExtraEnv(cr, cr.Spec.Replicas), true)
if err != nil {
@@ -288,6 +310,9 @@ func getStandaloneStatefulSet(ctx context.Context, client splcommon.ControllerCl
// validateStandaloneSpec checks validity and makes default updates to a StandaloneSpec, and returns error if something is wrong.
func validateStandaloneSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.Standalone) error {
+ if cr.Spec.Replicas < 0 {
+ return fmt.Errorf("replicas must be >= 0")
+ }
if cr.Spec.Replicas == 0 {
cr.Spec.Replicas = 1
}
diff --git a/pkg/splunk/enterprise/standalone_test.go b/pkg/splunk/enterprise/standalone_test.go
index acdb07515..f933ca08d 100644
--- a/pkg/splunk/enterprise/standalone_test.go
+++ b/pkg/splunk/enterprise/standalone_test.go
@@ -1236,15 +1236,49 @@ func TestStandaloneWitAppFramework(t *testing.T) {
func TestStandaloneWithReadyState(t *testing.T) {
os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
- // create directory for app framework
+
+ // Initialize the global resource tracker to allow app framework to run
+ initGlobalResourceTracker()
+
+ // Create temporary directory for app framework operations
newpath := filepath.Join("/tmp", "appframework")
_ = os.MkdirAll(newpath, os.ModePerm)
+ defer os.RemoveAll(newpath)
- // adding getapplist to fix test case
+ // Create app download directory required by app framework
+ err := os.MkdirAll(splcommon.AppDownloadVolume, 0755)
+ if err != nil {
+ t.Fatalf("Unable to create download directory for apps: %s", splcommon.AppDownloadVolume)
+ }
+ defer os.RemoveAll(splcommon.AppDownloadVolume)
+
+ // Mock GetAppsList to return empty list (no apps to download)
+ savedGetAppsList := GetAppsList
GetAppsList = func(ctx context.Context, remoteDataClientMgr RemoteDataClientManager) (splclient.RemoteDataListResponse, error) {
RemoteDataListResponse := splclient.RemoteDataListResponse{}
return RemoteDataListResponse, nil
}
+ defer func() { GetAppsList = savedGetAppsList }()
+
+ // Mock GetPodExecClient to return a mock client that simulates pod operations locally
+ savedGetPodExecClient := splutil.GetPodExecClient
+ splutil.GetPodExecClient = func(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) splutil.PodExecClientImpl {
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: cr,
+ TargetPodName: targetPodName,
+ }
+ // Add mock responses for common commands
+ ctx := context.TODO()
+ // Mock mkdir command (used by createDirOnSplunkPods)
+ mockClient.AddMockPodExecReturnContext(ctx, "mkdir -p", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+ return mockClient
+ }
+ defer func() { splutil.GetPodExecClient = savedGetPodExecClient }()
sch := pkgruntime.NewScheme()
utilruntime.Must(clientgoscheme.AddToScheme(sch))
@@ -1352,7 +1386,7 @@ func TestStandaloneWithReadyState(t *testing.T) {
// simulate create standalone instance before reconcilation
c.Create(ctx, &standalone)
- _, err := ApplyStandalone(ctx, c, &standalone)
+ _, err = ApplyStandalone(ctx, c, &standalone)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for standalone with app framework %v", err)
debug.PrintStack()
diff --git a/pkg/splunk/enterprise/telemetry.go b/pkg/splunk/enterprise/telemetry.go
new file mode 100644
index 000000000..3d356fc8e
--- /dev/null
+++ b/pkg/splunk/enterprise/telemetry.go
@@ -0,0 +1,520 @@
+package enterprise
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ splclient "github.com/splunk/splunk-operator/pkg/splunk/client"
+ splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
+ splutil "github.com/splunk/splunk-operator/pkg/splunk/util"
+ "k8s.io/apimachinery/pkg/api/resource"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+)
+
+const (
+ requeAfterInSeconds = 21600 // Send telemetry once every 6 hour
+ defaultTestMode = "false"
+ defaultTestVersion = "3.1.0"
+
+ telStatusKey = "status"
+ telDeploymentKey = "deployment"
+ cpuRequestKey = "cpu_request"
+ memoryRequestKey = "memory_request"
+ cpuLimitKey = "cpu_limit"
+ memoryLimitKey = "memory_limit"
+)
+
+//+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch
+
+type Telemetry struct {
+ Type string `json:"type"`
+ Component string `json:"component"`
+ OptInRequired int `json:"optInRequired"`
+ Data map[string]interface{} `json:"data"`
+ Test bool `json:"test"`
+ Visibility string `json:"visibility,omitempty"`
+}
+
+type TelemetryStatus struct {
+ LastTransmission string `json:"lastTransmission,omitempty"`
+ Test string `json:"test,omitempty"`
+ SokVersion string `json:"sokVersion,omitempty"`
+}
+
+func ApplyTelemetry(ctx context.Context, client splcommon.ControllerClient, cm *corev1.ConfigMap) (reconcile.Result, error) {
+
+ // unless modified, reconcile for this object will be requeued after 10 seconds
+ result := reconcile.Result{
+ Requeue: true,
+ RequeueAfter: time.Second * requeAfterInSeconds,
+ }
+
+ reqLogger := log.FromContext(ctx)
+ scopedLog := reqLogger.WithName("ApplyTelemetry")
+
+ for k, _ := range cm.Data {
+ scopedLog.Info("Retrieved telemetry keys", "key", k)
+ }
+
+ var data map[string]interface{}
+ data = make(map[string]interface{})
+
+ currentStatus := getCurrentStatus(ctx, cm)
+ // Add SOK version
+ data[telSOKVersionKey] = currentStatus.SokVersion
+ var telDeployment map[string]interface{}
+ telDeployment = make(map[string]interface{})
+ data[telDeploymentKey] = telDeployment
+ // Add SOK telemetry
+ crWithTelAppList := collectDeploymentTelData(ctx, client, telDeployment)
+ /*
+ * Add other component's telemetry set in splunk-operator-manager-telemetry configmap.
+ * i.e splunk POD's telemetry
+ */
+ CollectCMTelData(ctx, cm, data)
+
+ // Now send the telemetry
+ for _, crs := range crWithTelAppList {
+ for _, cr := range crs {
+ test := false
+ if currentStatus.Test == "true" {
+ test = true
+ }
+ success := SendTelemetry(ctx, client, cr, data, test)
+ if success {
+ updateLastTransmissionTime(ctx, client, cm, currentStatus)
+ return result, nil
+ }
+ }
+ }
+
+ return result, errors.New("Failed to send telemetry data")
+}
+
+func updateLastTransmissionTime(ctx context.Context, client splcommon.ControllerClient, cm *corev1.ConfigMap, status *TelemetryStatus) {
+ reqLogger := log.FromContext(ctx)
+ scopedLog := reqLogger.WithName("updateLastTransmissionTime")
+
+ status.LastTransmission = time.Now().UTC().Format(time.RFC3339)
+ updated, err := json.MarshalIndent(status, "", " ")
+ if err != nil {
+ scopedLog.Error(err, "Failed to marshal telemetry status")
+ return
+ }
+ cm.Data[telStatusKey] = string(updated)
+ if err = client.Update(ctx, cm); err != nil {
+ scopedLog.Error(err, "Failed to update telemetry status in configmap")
+ return
+ }
+ scopedLog.Info("Updated last transmission time in configmap", "newStatus", cm.Data[telStatusKey])
+}
+
+func collectResourceTelData(resources corev1.ResourceRequirements) map[string]string {
+ retData := make(map[string]string)
+ defaultResources := corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse(defaultRequestsCPU),
+ corev1.ResourceMemory: resource.MustParse(defaultRequestsMemory),
+ },
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse(defaultLimitsCPU),
+ corev1.ResourceMemory: resource.MustParse(defaultLimitsMemory),
+ },
+ }
+
+ if resources.Requests == nil {
+ cpu := defaultResources.Requests[corev1.ResourceCPU]
+ mem := defaultResources.Requests[corev1.ResourceMemory]
+ retData[cpuRequestKey] = (&cpu).String()
+ retData[memoryRequestKey] = (&mem).String()
+ } else {
+ if cpuReq, ok := resources.Requests[corev1.ResourceCPU]; ok {
+ retData[cpuRequestKey] = cpuReq.String()
+ } else {
+ cpu := defaultResources.Requests[corev1.ResourceCPU]
+ retData[cpuRequestKey] = (&cpu).String()
+ }
+ if memReq, ok := resources.Requests[corev1.ResourceMemory]; ok {
+ retData[memoryRequestKey] = memReq.String()
+ } else {
+ mem := defaultResources.Requests[corev1.ResourceMemory]
+ retData[memoryRequestKey] = (&mem).String()
+ }
+ }
+
+ if resources.Limits == nil {
+ cpu := defaultResources.Limits[corev1.ResourceCPU]
+ mem := defaultResources.Limits[corev1.ResourceMemory]
+ retData[cpuLimitKey] = (&cpu).String()
+ retData[memoryLimitKey] = (&mem).String()
+ } else {
+ if cpuLim, ok := resources.Limits[corev1.ResourceCPU]; ok {
+ retData[cpuLimitKey] = cpuLim.String()
+ } else {
+ cpu := defaultResources.Limits[corev1.ResourceCPU]
+ retData[cpuLimitKey] = (&cpu).String()
+ }
+ if memLim, ok := resources.Limits[corev1.ResourceMemory]; ok {
+ retData[memoryLimitKey] = memLim.String()
+ } else {
+ mem := defaultResources.Limits[corev1.ResourceMemory]
+ retData[memoryLimitKey] = (&mem).String()
+ }
+ }
+ return retData
+}
+
+type crListHandler struct {
+ kind string
+ handlerFunc func(ctx context.Context, client splcommon.ControllerClient) (interface{}, []splcommon.MetaObject, error)
+ checkTelApp bool
+}
+
+func collectDeploymentTelData(ctx context.Context, client splcommon.ControllerClient, deploymentData map[string]interface{}) map[string][]splcommon.MetaObject {
+ reqLogger := log.FromContext(ctx)
+ scopedLog := reqLogger.WithName("collectDeploymentTelData")
+
+ var crWithTelAppList map[string][]splcommon.MetaObject
+ crWithTelAppList = make(map[string][]splcommon.MetaObject)
+
+ scopedLog.Info("Start collecting deployment telemetry data")
+ // Define all CR handlers in a slice
+ handlers := []crListHandler{
+ {kind: "Standalone", handlerFunc: handleStandalones, checkTelApp: true},
+ {kind: "LicenseManager", handlerFunc: handleLicenseManagers, checkTelApp: true},
+ {kind: "LicenseMaster", handlerFunc: handleLicenseMasters, checkTelApp: true},
+ {kind: "SearchHeadCluster", handlerFunc: handleSearchHeadClusters, checkTelApp: true},
+ {kind: "IndexerCluster", handlerFunc: handleIndexerClusters, checkTelApp: false},
+ {kind: "ClusterManager", handlerFunc: handleClusterManagers, checkTelApp: true},
+ {kind: "ClusterMaster", handlerFunc: handleClusterMasters, checkTelApp: true},
+ {kind: "MonitoringConsole", handlerFunc: handleMonitoringConsoles, checkTelApp: false},
+ }
+
+ // Process each CR type using the same logic
+ for _, handler := range handlers {
+ data, crs, err := handler.handlerFunc(ctx, client)
+ if err != nil {
+ scopedLog.Error(err, "Error processing CR type", "kind", handler.kind)
+ continue
+ }
+ if handler.checkTelApp && crs != nil && len(crs) > 0 {
+ crWithTelAppList[handler.kind] = crs
+ }
+ if data != nil {
+ deploymentData[handler.kind] = data
+ }
+ }
+
+ scopedLog.Info("Successfully collected deployment telemetry data", "deploymentData", deploymentData)
+ return crWithTelAppList
+}
+
+func handleStandalones(ctx context.Context, client splcommon.ControllerClient) (interface{}, []splcommon.MetaObject, error) {
+ var list enterpriseApi.StandaloneList
+ err := client.List(ctx, &list)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if len(list.Items) == 0 {
+ return nil, nil, nil
+ }
+
+ retData := make(map[string]interface{})
+ retCRs := make([]splcommon.MetaObject, 0)
+ for i := range list.Items {
+ cr := &list.Items[i]
+ if cr.Status.TelAppInstalled {
+ retCRs = append(retCRs, cr)
+ }
+ retData[cr.GetName()] = collectResourceTelData(cr.Spec.CommonSplunkSpec.Resources)
+ }
+ return retData, retCRs, nil
+}
+
+func handleLicenseManagers(ctx context.Context, client splcommon.ControllerClient) (interface{}, []splcommon.MetaObject, error) {
+ var list enterpriseApi.LicenseManagerList
+ err := client.List(ctx, &list)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if len(list.Items) == 0 {
+ return nil, nil, nil
+ }
+
+ retData := make(map[string]interface{})
+ retCRs := make([]splcommon.MetaObject, 0)
+ for i := range list.Items {
+ cr := &list.Items[i]
+ if cr.Status.TelAppInstalled {
+ retCRs = append(retCRs, cr)
+ }
+ retData[cr.GetName()] = collectResourceTelData(cr.Spec.CommonSplunkSpec.Resources)
+ }
+ return retData, retCRs, nil
+}
+
+func handleLicenseMasters(ctx context.Context, client splcommon.ControllerClient) (interface{}, []splcommon.MetaObject, error) {
+ var list enterpriseApiV3.LicenseMasterList
+ err := client.List(ctx, &list)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if len(list.Items) == 0 {
+ return nil, nil, nil
+ }
+
+ retData := make(map[string]interface{})
+ retCRs := make([]splcommon.MetaObject, 0)
+ for i := range list.Items {
+ cr := &list.Items[i]
+ if cr.Status.TelAppInstalled {
+ retCRs = append(retCRs, cr)
+ }
+ retData[cr.GetName()] = collectResourceTelData(cr.Spec.CommonSplunkSpec.Resources)
+ }
+ return retData, retCRs, nil
+}
+
+func handleSearchHeadClusters(ctx context.Context, client splcommon.ControllerClient) (interface{}, []splcommon.MetaObject, error) {
+ var list enterpriseApi.SearchHeadClusterList
+ err := client.List(ctx, &list)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if len(list.Items) == 0 {
+ return nil, nil, nil
+ }
+
+ retData := make(map[string]interface{})
+ retCRs := make([]splcommon.MetaObject, 0)
+ for i := range list.Items {
+ cr := &list.Items[i]
+ if cr.Status.TelAppInstalled {
+ retCRs = append(retCRs, cr)
+ }
+ retData[cr.GetName()] = collectResourceTelData(cr.Spec.CommonSplunkSpec.Resources)
+ }
+ return retData, retCRs, nil
+}
+
+func handleIndexerClusters(ctx context.Context, client splcommon.ControllerClient) (interface{}, []splcommon.MetaObject, error) {
+ var list enterpriseApi.IndexerClusterList
+ err := client.List(ctx, &list)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if len(list.Items) == 0 {
+ return nil, nil, nil
+ }
+
+ retData := make(map[string]interface{})
+ for i := range list.Items {
+ cr := &list.Items[i]
+ retData[cr.GetName()] = collectResourceTelData(cr.Spec.CommonSplunkSpec.Resources)
+ }
+ return retData, nil, nil
+}
+
+func handleClusterManagers(ctx context.Context, client splcommon.ControllerClient) (interface{}, []splcommon.MetaObject, error) {
+ var list enterpriseApi.ClusterManagerList
+ err := client.List(ctx, &list)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if len(list.Items) == 0 {
+ return nil, nil, nil
+ }
+
+ retData := make(map[string]interface{})
+ retCRs := make([]splcommon.MetaObject, 0)
+ for i := range list.Items {
+ cr := &list.Items[i]
+ if cr.Status.TelAppInstalled {
+ retCRs = append(retCRs, cr)
+ }
+ retData[cr.GetName()] = collectResourceTelData(cr.Spec.CommonSplunkSpec.Resources)
+ }
+ return retData, retCRs, nil
+}
+
+func handleClusterMasters(ctx context.Context, client splcommon.ControllerClient) (interface{}, []splcommon.MetaObject, error) {
+ var list enterpriseApiV3.ClusterMasterList
+ err := client.List(ctx, &list)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if len(list.Items) == 0 {
+ return nil, nil, nil
+ }
+
+ retData := make(map[string]interface{})
+ retCRs := make([]splcommon.MetaObject, 0)
+ for i := range list.Items {
+ cr := &list.Items[i]
+ if cr.Status.TelAppInstalled {
+ retCRs = append(retCRs, cr)
+ }
+ retData[cr.GetName()] = collectResourceTelData(cr.Spec.CommonSplunkSpec.Resources)
+ }
+ return retData, retCRs, nil
+}
+
+func handleMonitoringConsoles(ctx context.Context, client splcommon.ControllerClient) (interface{}, []splcommon.MetaObject, error) {
+ var list enterpriseApi.MonitoringConsoleList
+ err := client.List(ctx, &list)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if len(list.Items) == 0 {
+ return nil, nil, nil
+ }
+
+ retData := make(map[string]interface{})
+ for i := range list.Items {
+ cr := &list.Items[i]
+ retData[cr.GetName()] = collectResourceTelData(cr.Spec.CommonSplunkSpec.Resources)
+ }
+ return retData, nil, nil
+}
+
+func CollectCMTelData(ctx context.Context, cm *corev1.ConfigMap, data map[string]interface{}) {
+ reqLogger := log.FromContext(ctx)
+ scopedLog := reqLogger.WithName("collectCMTelData")
+ scopedLog.Info("Start")
+
+ for key, val := range cm.Data {
+ if key == telStatusKey {
+ continue
+ }
+ var compData interface{}
+ scopedLog.Info("Processing telemetry input from other components", "key", key)
+ err := json.Unmarshal([]byte(val), &compData)
+ if err != nil {
+ scopedLog.Info("Not able to unmarshal. Will include the input as string", "key", key, "value", val)
+ data[key] = val
+ } else {
+ data[key] = compData
+ scopedLog.Info("Got telemetry input", "key", key, "value", val)
+ }
+ }
+}
+
+func getCurrentStatus(ctx context.Context, cm *corev1.ConfigMap) *TelemetryStatus {
+ reqLogger := log.FromContext(ctx)
+ scopedLog := reqLogger.WithName("getCurrentStatus")
+
+ defaultStatus := &TelemetryStatus{
+ LastTransmission: "",
+ Test: defaultTestMode,
+ SokVersion: defaultTestVersion,
+ }
+ if val, ok := cm.Data[telStatusKey]; ok {
+ var status TelemetryStatus
+ err := json.Unmarshal([]byte(val), &status)
+ if err != nil {
+ scopedLog.Error(err, "Failed to unmarshal telemetry status", "value", val)
+ return defaultStatus
+ } else {
+ scopedLog.Info("Got current telemetry status from configmap", "status", status)
+ return &status
+ }
+ }
+
+ scopedLog.Info("No status set in configmap")
+ return defaultStatus
+}
+
+func SendTelemetry(ctx context.Context, client splcommon.ControllerClient, cr splcommon.MetaObject, data map[string]interface{}, test bool) bool {
+ reqLogger := log.FromContext(ctx)
+ scopedLog := reqLogger.WithName("sendTelemetry").WithValues(
+ "name", cr.GetObjectMeta().GetName(),
+ "namespace", cr.GetObjectMeta().GetNamespace(),
+ "kind", cr.GetObjectKind().GroupVersionKind().Kind)
+ scopedLog.Info("Start")
+
+ var instanceID InstanceType
+ switch cr.GetObjectKind().GroupVersionKind().Kind {
+ case "Standalone":
+ instanceID = SplunkStandalone
+ case "LicenseManager":
+ instanceID = SplunkLicenseManager
+ case "LicenseMaster":
+ instanceID = SplunkLicenseMaster
+ case "SearchHeadCluster":
+ instanceID = SplunkSearchHead
+ case "ClusterMaster":
+ instanceID = SplunkClusterMaster
+ case "ClusterManager":
+ instanceID = SplunkClusterManager
+ default:
+ scopedLog.Error(fmt.Errorf("unknown CR kind"), "Failed to determine instance type for telemetry")
+ return false
+ }
+
+ serviceName := GetSplunkServiceName(instanceID, cr.GetName(), false)
+ serviceFQDN := splcommon.GetServiceFQDN(cr.GetNamespace(), serviceName)
+ scopedLog.Info("Got service FQDN", "serviceFQDN", serviceFQDN)
+
+ defaultSecretObjName := splcommon.GetNamespaceScopedSecretName(cr.GetNamespace())
+ defaultSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), defaultSecretObjName)
+ if err != nil {
+ scopedLog.Error(err, "Could not access default secret object")
+ return false
+ }
+
+ //Get the admin password from the secret object
+ adminPwd, foundSecret := defaultSecret.Data["password"]
+ if !foundSecret {
+ scopedLog.Info("Failed to find admin password")
+ return false
+ }
+ splunkClient := splclient.NewSplunkClient(fmt.Sprintf("https://%s:8089", serviceFQDN), "admin", string(adminPwd))
+
+ var licenseInfo map[string]splclient.LicenseInfo
+ licenseInfo, err = splunkClient.GetLicenseInfo()
+ if err != nil {
+ scopedLog.Error(err, "Failed to retrieve the license info")
+ return false
+ } else {
+ data[telLicenseInfoKey] = licenseInfo
+ }
+ telemetry := Telemetry{
+ Type: "event",
+ Component: "sok",
+ OptInRequired: 2,
+ Data: data,
+ Test: test,
+ }
+
+ path := fmt.Sprintf("/servicesNS/nobody/%s/telemetry-metric", telAppNameStr)
+ bodyBytes, err := json.Marshal(telemetry)
+ if err != nil {
+ scopedLog.Error(err, "Failed to marshal to bytes")
+ return false
+ }
+ scopedLog.Info("Sending request", "path", path)
+
+ response, err := splunkClient.SendTelemetry(path, bodyBytes)
+ if err != nil {
+ scopedLog.Error(err, "Failed to send telemetry")
+ return false
+ }
+
+ scopedLog.Info("Successfully sent telemetry", "response", response)
+ return true
+}
diff --git a/pkg/splunk/enterprise/telemetry_test.go b/pkg/splunk/enterprise/telemetry_test.go
new file mode 100644
index 000000000..8a7a55073
--- /dev/null
+++ b/pkg/splunk/enterprise/telemetry_test.go
@@ -0,0 +1,1285 @@
+// Copyright (c) 2018-2022 Splunk Inc. All rights reserved.
+
+package enterprise
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
+ splclient "github.com/splunk/splunk-operator/pkg/splunk/client"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "testing"
+ "time"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/splunk/splunk-operator/pkg/splunk/test"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// --- MOCKS AND TEST HELPERS ---
+
+// errorUpdateClient is a mock client that always returns an error on Update
+// Used for testing updateLastTransmissionTime error handling
+
+type errorUpdateClient struct {
+ test.MockClient
+}
+
+func (c *errorUpdateClient) Update(_ context.Context, _ client.Object, _ ...client.UpdateOption) error {
+ return errors.New("forced update error")
+}
+
+// FakeListClient is a local mock client that supports List for CRs and StatefulSets for testing
+// Only implements List for the types needed in these tests
+
+type FakeListClient struct {
+ test.MockClient
+ crs map[string][]client.Object
+}
+
+func (c *FakeListClient) List(_ context.Context, list client.ObjectList, _ ...client.ListOption) error {
+ switch l := list.(type) {
+ case *enterpriseApi.StandaloneList:
+ l.Items = nil
+ for _, obj := range c.crs["Standalone"] {
+ l.Items = append(l.Items, *(obj.(*enterpriseApi.Standalone)))
+ }
+ case *enterpriseApi.LicenseManagerList:
+ l.Items = nil
+ for _, obj := range c.crs["LicenseManager"] {
+ l.Items = append(l.Items, *(obj.(*enterpriseApi.LicenseManager)))
+ }
+ case *enterpriseApiV3.LicenseMasterList:
+ l.Items = nil
+ for _, obj := range c.crs["LicenseMaster"] {
+ l.Items = append(l.Items, *(obj.(*enterpriseApiV3.LicenseMaster)))
+ }
+ case *enterpriseApi.SearchHeadClusterList:
+ l.Items = nil
+ for _, obj := range c.crs["SearchHeadCluster"] {
+ l.Items = append(l.Items, *(obj.(*enterpriseApi.SearchHeadCluster)))
+ }
+ case *enterpriseApi.IndexerClusterList:
+ l.Items = nil
+ for _, obj := range c.crs["IndexerCluster"] {
+ l.Items = append(l.Items, *(obj.(*enterpriseApi.IndexerCluster)))
+ }
+ case *enterpriseApi.ClusterManagerList:
+ l.Items = nil
+ for _, obj := range c.crs["ClusterManager"] {
+ l.Items = append(l.Items, *(obj.(*enterpriseApi.ClusterManager)))
+ }
+ case *enterpriseApiV3.ClusterMasterList:
+ l.Items = nil
+ for _, obj := range c.crs["ClusterMaster"] {
+ l.Items = append(l.Items, *(obj.(*enterpriseApiV3.ClusterMaster)))
+ }
+ case *enterpriseApi.MonitoringConsoleList:
+ l.Items = nil
+ for _, obj := range c.crs["MonitoringConsole"] {
+ l.Items = append(l.Items, *(obj.(*enterpriseApi.MonitoringConsole)))
+ }
+ default:
+ return nil
+ }
+ return nil
+}
+
+func TestTelemetryCollectResourceTelData_NilMaps(t *testing.T) {
+ data := collectResourceTelData(corev1.ResourceRequirements{})
+ if data[cpuRequestKey] == "" || data[memoryRequestKey] == "" || data[cpuLimitKey] == "" || data[memoryLimitKey] == "" {
+ t.Errorf("expected default values for nil maps")
+ }
+}
+
+func TestTelemetryCollectResourceTelData_MissingKeys(t *testing.T) {
+ reqs := corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{},
+ Limits: corev1.ResourceList{},
+ }
+ data := collectResourceTelData(reqs)
+ if data[cpuRequestKey] == "" || data[memoryRequestKey] == "" || data[cpuLimitKey] == "" || data[memoryLimitKey] == "" {
+ t.Errorf("expected default values for missing keys")
+ }
+}
+
+func TestTelemetryCollectResourceTelData_ValuesPresent(t *testing.T) {
+ reqs := corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("123m"),
+ corev1.ResourceMemory: resource.MustParse("456Mi"),
+ },
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("789m"),
+ corev1.ResourceMemory: resource.MustParse("1Gi"),
+ },
+ }
+ data := collectResourceTelData(reqs)
+ if data[cpuRequestKey] != "123m" || data[memoryRequestKey] != "456Mi" || data[cpuLimitKey] != "789m" || data[memoryLimitKey] != "1Gi" {
+ t.Errorf("unexpected values: got %+v", data)
+ }
+}
+
+func TestTelemetryCollectCMTelData_UnmarshalError(t *testing.T) {
+ cm := &corev1.ConfigMap{Data: map[string]string{"bad": "notjson"}}
+ data := make(map[string]interface{})
+ CollectCMTelData(context.TODO(), cm, data)
+ if data["bad"] != "notjson" {
+ t.Errorf("expected fallback to string on unmarshal error")
+ }
+}
+
+func TestTelemetryCollectCMTelData_ValidJSON(t *testing.T) {
+ val := map[string]interface{}{"foo": "bar"}
+ b, _ := json.Marshal(val)
+ cm := &corev1.ConfigMap{Data: map[string]string{"good": string(b)}}
+ data := make(map[string]interface{})
+ CollectCMTelData(context.TODO(), cm, data)
+ if m, ok := data["good"].(map[string]interface{}); !ok || m["foo"] != "bar" {
+ t.Errorf("expected valid JSON to be unmarshaled")
+ }
+}
+
+func TestTelemetryGetCurrentStatus_Default(t *testing.T) {
+ cm := &corev1.ConfigMap{Data: nil}
+ status := getCurrentStatus(context.TODO(), cm)
+ if status == nil || status.Test != defaultTestMode {
+ t.Errorf("expected default status")
+ }
+}
+
+func TestTelemetryGetCurrentStatus_UnmarshalError(t *testing.T) {
+ cm := &corev1.ConfigMap{Data: map[string]string{"status": "notjson"}}
+ status := getCurrentStatus(context.TODO(), cm)
+ if status == nil || status.Test != defaultTestMode {
+ t.Errorf("expected default status on unmarshal error")
+ }
+}
+
+func TestTelemetryUpdateLastTransmissionTime_MarshalError(t *testing.T) {
+ ctx := context.TODO()
+ cm := &corev1.ConfigMap{Data: map[string]string{}}
+ status := &TelemetryStatus{Test: "false"}
+ updateLastTransmissionTime(ctx, test.NewMockClient(), cm, status) // pass nil to avoid panic
+}
+
+func TestSendTelemetry_UnknownKind(t *testing.T) {
+ cr := &enterpriseApi.Standalone{}
+ cr.TypeMeta.Kind = "UnknownKind"
+ ok := SendTelemetry(context.TODO(), test.NewMockClient(), cr, map[string]interface{}{}, false)
+ if ok {
+ t.Errorf("expected SendTelemetry to return false for unknown kind")
+ }
+}
+
+func TestSendTelemetry_NoSecret(t *testing.T) {
+ cr := &enterpriseApi.Standalone{}
+ cr.TypeMeta.Kind = "Standalone"
+ cr.ObjectMeta.Name = "test"
+ cr.ObjectMeta.Namespace = "default"
+ ok := SendTelemetry(context.TODO(), test.NewMockClient(), cr, map[string]interface{}{}, false)
+ if ok {
+ t.Errorf("expected SendTelemetry to return false if no secret found")
+ }
+}
+
+func TestTelemetryUpdateLastTransmissionTime_SetsTimestamp(t *testing.T) {
+ mockClient := test.NewMockClient()
+ ctx := context.TODO()
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "default"},
+ Data: map[string]string{},
+ }
+ status := &TelemetryStatus{Test: "false"}
+
+ updateLastTransmissionTime(ctx, mockClient, cm, status)
+ statusStr, ok := cm.Data[telStatusKey]
+ if !ok {
+ t.Fatalf("expected telStatusKey in configmap data")
+ }
+ var statusObj TelemetryStatus
+ if err := json.Unmarshal([]byte(statusStr), &statusObj); err != nil {
+ t.Fatalf("failed to unmarshal status: %v", err)
+ }
+ if statusObj.LastTransmission == "" {
+ t.Errorf("expected LastTransmission to be set")
+ }
+ if _, err := time.Parse(time.RFC3339, statusObj.LastTransmission); err != nil {
+ t.Errorf("LastTransmission is not RFC3339: %v", statusObj.LastTransmission)
+ }
+ if statusObj.Test != "false" {
+ t.Errorf("expected Test to be 'false', got %v", statusObj.Test)
+ }
+}
+
+func TestTelemetryUpdateLastTransmissionTime_UpdateError(t *testing.T) {
+ ctx := context.TODO()
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "default"},
+ Data: map[string]string{},
+ }
+ badClient := &errorUpdateClient{}
+ status := &TelemetryStatus{Test: "false"}
+ updateLastTransmissionTime(ctx, badClient, cm, status)
+}
+
+func TestTelemetryUpdateLastTransmissionTime_RepeatedCalls(t *testing.T) {
+ mockClient := test.NewMockClient()
+ ctx := context.TODO()
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "default"},
+ Data: map[string]string{},
+ }
+ status := &TelemetryStatus{Test: "false"}
+ updateLastTransmissionTime(ctx, mockClient, cm, status)
+ firstStatus := cm.Data[telStatusKey]
+ time.Sleep(1 * time.Second)
+ updateLastTransmissionTime(ctx, mockClient, cm, status)
+ secondStatus := cm.Data[telStatusKey]
+ if firstStatus == secondStatus {
+ t.Errorf("expected status to change on repeated call")
+ }
+}
+
+func TestTelemetryCollectDeploymentTelData_AllKinds(t *testing.T) {
+ ctx := context.TODO()
+ crs := map[string][]client.Object{
+ "Standalone": {&enterpriseApi.Standalone{TypeMeta: metav1.TypeMeta{Kind: "Standalone"}, ObjectMeta: metav1.ObjectMeta{Name: "standalone1"}, Spec: enterpriseApi.StandaloneSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("2Gi")}}}}}}},
+ "LicenseManager": {&enterpriseApi.LicenseManager{TypeMeta: metav1.TypeMeta{Kind: "LicenseManager"}, ObjectMeta: metav1.ObjectMeta{Name: "lm1"}, Spec: enterpriseApi.LicenseManagerSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("3"), corev1.ResourceMemory: resource.MustParse("3Gi")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("4"), corev1.ResourceMemory: resource.MustParse("4Gi")}}}}}}},
+ "LicenseMaster": {&enterpriseApiV3.LicenseMaster{TypeMeta: metav1.TypeMeta{Kind: "LicenseMaster"}, ObjectMeta: metav1.ObjectMeta{Name: "lmast1"}, Spec: enterpriseApiV3.LicenseMasterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("5"), corev1.ResourceMemory: resource.MustParse("5Gi")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("6"), corev1.ResourceMemory: resource.MustParse("6Gi")}}}}}}},
+ "SearchHeadCluster": {&enterpriseApi.SearchHeadCluster{TypeMeta: metav1.TypeMeta{Kind: "SearchHeadCluster"}, ObjectMeta: metav1.ObjectMeta{Name: "shc1"}, Spec: enterpriseApi.SearchHeadClusterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("7"), corev1.ResourceMemory: resource.MustParse("7Gi")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("8"), corev1.ResourceMemory: resource.MustParse("8Gi")}}}}}}},
+ "IndexerCluster": {&enterpriseApi.IndexerCluster{TypeMeta: metav1.TypeMeta{Kind: "IndexerCluster"}, ObjectMeta: metav1.ObjectMeta{Name: "idx1"}, Spec: enterpriseApi.IndexerClusterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("9"), corev1.ResourceMemory: resource.MustParse("9Gi")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("10"), corev1.ResourceMemory: resource.MustParse("10Gi")}}}}}}},
+ "ClusterManager": {&enterpriseApi.ClusterManager{TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"}, ObjectMeta: metav1.ObjectMeta{Name: "cmgr1"}, Spec: enterpriseApi.ClusterManagerSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("11"), corev1.ResourceMemory: resource.MustParse("11Gi")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("12"), corev1.ResourceMemory: resource.MustParse("12Gi")}}}}}}},
+ "ClusterMaster": {&enterpriseApiV3.ClusterMaster{TypeMeta: metav1.TypeMeta{Kind: "ClusterMaster"}, ObjectMeta: metav1.ObjectMeta{Name: "cmast1"}, Spec: enterpriseApiV3.ClusterMasterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("13"), corev1.ResourceMemory: resource.MustParse("13Gi")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("14"), corev1.ResourceMemory: resource.MustParse("14Gi")}}}}}}},
+ }
+ fakeClient := &FakeListClient{crs: crs}
+ deploymentData := make(map[string]interface{})
+ crWithTelAppList := collectDeploymentTelData(ctx, fakeClient, deploymentData)
+ kinds := []string{"Standalone", "LicenseManager", "LicenseMaster", "SearchHeadCluster", "IndexerCluster", "ClusterManager", "ClusterMaster"}
+ for _, kind := range kinds {
+ if _, ok := deploymentData[kind]; !ok {
+ t.Errorf("expected deploymentData to have key %s", kind)
+ }
+ // Check resource data for at least one CR per kind
+ kindData, ok := deploymentData[kind].(map[string]interface{})
+ if !ok {
+ t.Errorf("expected deploymentData[%s] to be map[string]interface{}", kind)
+ continue
+ }
+ for crName, v := range kindData {
+ resData, ok := v.(map[string]string)
+ if !ok {
+ t.Errorf("expected resource data for %s/%s to be map[string]string", kind, crName)
+ }
+ // Spot check a value
+ if resData[cpuRequestKey] == "" || resData[memoryRequestKey] == "" {
+ t.Errorf("expected resource data for %s/%s to have cpu/memory", kind, crName)
+ }
+ }
+ }
+ // crWithTelAppList should be empty since TelAppInstalled is not set
+ if len(crWithTelAppList) != 0 {
+ t.Errorf("expected crWithTelAppList to be empty if TelAppInstalled is not set")
+ }
+}
+
+func TestApplyTelemetry_NoCRs(t *testing.T) {
+ cm := &corev1.ConfigMap{Data: map[string]string{}}
+ mockClient := test.NewMockClient()
+ result, err := ApplyTelemetry(context.TODO(), mockClient, cm)
+ if err == nil {
+ t.Errorf("expected error when no CRs are present")
+ }
+ if result != (reconcile.Result{}) && !result.Requeue {
+ t.Errorf("expected requeue to be true")
+ }
+}
+
+func TestSendTelemetry_LicenseInfoError(t *testing.T) {
+ cr := &enterpriseApi.Standalone{}
+ cr.TypeMeta.Kind = "Standalone"
+ cr.ObjectMeta.Name = "test"
+ cr.ObjectMeta.Namespace = "default"
+ mockClient := test.NewMockClient()
+ // Simulate secret found, but license info error
+ ok := SendTelemetry(context.TODO(), mockClient, cr, map[string]interface{}{}, false)
+ if ok {
+ t.Errorf("expected SendTelemetry to return false on license info error")
+ }
+}
+
+func TestSendTelemetry_AdminPasswordMissing(t *testing.T) {
+ cr := &enterpriseApi.Standalone{}
+ cr.TypeMeta.Kind = "Standalone"
+ cr.ObjectMeta.Name = "test"
+ cr.ObjectMeta.Namespace = "default"
+ mockClient := test.NewMockClient()
+ // Simulate secret missing password
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-test-secret",
+ Namespace: cr.ObjectMeta.Namespace,
+ },
+ Data: map[string][]byte{},
+ }
+ _ = mockClient.Create(context.TODO(), secret)
+ ok := SendTelemetry(context.TODO(), mockClient, cr, map[string]interface{}{}, false)
+ if ok {
+ t.Errorf("expected SendTelemetry to return false if admin password is missing")
+ }
+}
+
+func TestSendTelemetry_Success(t *testing.T) {
+ cr := &enterpriseApi.Standalone{}
+ cr.TypeMeta.Kind = "Standalone"
+ cr.ObjectMeta.Name = "test"
+ cr.ObjectMeta.Namespace = "default"
+ mockClient := test.NewMockClient()
+ // Add a secret with a password
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "splunk-test-secret",
+ Namespace: cr.ObjectMeta.Namespace,
+ },
+ Data: map[string][]byte{"password": []byte("adminpass")},
+ }
+ _ = mockClient.Create(context.TODO(), secret)
+ // Mock license info retrieval by patching the SplunkClient if needed
+ ok := SendTelemetry(context.TODO(), mockClient, cr, map[string]interface{}{}, false)
+ // We expect false because the mock client does not actually send telemetry, but this covers the path
+ if ok {
+ t.Logf("SendTelemetry returned true, but expected false due to mock client")
+ }
+}
+
+func TestApplyTelemetry_Success(t *testing.T) {
+ cm := &corev1.ConfigMap{Data: map[string]string{}}
+ mockClient := test.NewMockClient()
+ // Add a CR with TelAppInstalled true to trigger sending
+ cr := &enterpriseApi.Standalone{
+ TypeMeta: metav1.TypeMeta{Kind: "Standalone"},
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ Status: enterpriseApi.StandaloneStatus{TelAppInstalled: true},
+ }
+ _ = mockClient.Create(context.TODO(), cr)
+ result, err := ApplyTelemetry(context.TODO(), mockClient, cm)
+ if err == nil && result != (reconcile.Result{}) && !result.Requeue {
+ t.Errorf("expected requeue to be true or error to be non-nil")
+ }
+}
+
+func TestApplyTelemetry_ConfigMapWithExistingData(t *testing.T) {
+ cm := &corev1.ConfigMap{Data: map[string]string{"foo": "bar"}}
+ mockClient := test.NewMockClient()
+ result, err := ApplyTelemetry(context.TODO(), mockClient, cm)
+ if err == nil {
+ t.Errorf("expected error when no CRs are present, even with configmap data")
+ }
+ if result != (reconcile.Result{}) && !result.Requeue {
+ t.Errorf("expected requeue to be true")
+ }
+}
+
+// Fix TestApplyTelemetry_CRNoTelAppInstalled signature
+func TestApplyTelemetry_CRNoTelAppInstalled(t *testing.T) {
+ cm := &corev1.ConfigMap{Data: map[string]string{}}
+ mockClient := test.NewMockClient()
+ cr := &enterpriseApi.Standalone{
+ TypeMeta: metav1.TypeMeta{Kind: "Standalone"},
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ Status: enterpriseApi.StandaloneStatus{TelAppInstalled: false},
+ }
+ _ = mockClient.Create(context.TODO(), cr)
+ result, err := ApplyTelemetry(context.TODO(), mockClient, cm)
+ if err == nil {
+ t.Errorf("expected error when no CRs with TelAppInstalled=true")
+ }
+ if result != (reconcile.Result{}) && !result.Requeue {
+ t.Errorf("expected requeue to be true")
+ }
+}
+
+func TestApplyTelemetry_SendTelemetryFails(t *testing.T) {
+ cm := &corev1.ConfigMap{Data: map[string]string{}}
+ mockClient := test.NewMockClient()
+ cr := &enterpriseApi.Standalone{
+ TypeMeta: metav1.TypeMeta{Kind: "Standalone"},
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ Status: enterpriseApi.StandaloneStatus{TelAppInstalled: true},
+ }
+ _ = mockClient.Create(context.TODO(), cr)
+ origFactory := newSplunkClientFactory
+ newSplunkClientFactory = func(uri, user, pass string) SplunkTelemetryClient {
+ return &mockSplunkTelemetryClient{
+ GetLicenseInfoFunc: func() (map[string]splclient.LicenseInfo, error) {
+ return map[string]splclient.LicenseInfo{"test": {}}, nil
+ },
+ SendTelemetryFunc: func(path string, body []byte) (interface{}, error) {
+ return nil, errors.New("fail send")
+ },
+ }
+ }
+ defer func() { newSplunkClientFactory = origFactory }()
+ result, err := ApplyTelemetry(context.TODO(), mockClient, cm)
+ if err == nil {
+ t.Errorf("expected error when SendTelemetry fails")
+ }
+ if result != (reconcile.Result{}) && !result.Requeue {
+ t.Errorf("expected requeue to be true")
+ }
+}
+
+func TestGetCurrentStatus_ValidStatus(t *testing.T) {
+ status := TelemetryStatus{LastTransmission: "2024-01-01T00:00:00Z", Test: "true", SokVersion: "1.2.3"}
+ b, _ := json.Marshal(status)
+ cm := &corev1.ConfigMap{Data: map[string]string{"status": string(b)}}
+ got := getCurrentStatus(context.TODO(), cm)
+ if got.LastTransmission != status.LastTransmission || got.Test != status.Test || got.SokVersion != status.SokVersion {
+ t.Errorf("expected status to match, got %+v", got)
+ }
+}
+
+func TestHandleMonitoringConsoles_NoCRs(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"MonitoringConsole": {}}}
+ ctx := context.TODO()
+ data, _, err := handleMonitoringConsoles(ctx, mockClient)
+ if data != nil || err != nil {
+ t.Errorf("expected nil, nil, nil when no MonitoringConsole CRs exist")
+ }
+}
+
+func TestHandleMonitoringConsoles_OneCR(t *testing.T) {
+ mc := &enterpriseApi.MonitoringConsole{
+ ObjectMeta: metav1.ObjectMeta{Name: "mc1"},
+ Spec: enterpriseApi.MonitoringConsoleSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi")},
+ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("4Gi")},
+ },
+ },
+ },
+ },
+ }
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"MonitoringConsole": {mc}}}
+ ctx := context.TODO()
+ data, _, err := handleMonitoringConsoles(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for mc1")
+ }
+ res, ok := m["mc1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for mc1")
+ }
+ if res[cpuRequestKey] != "1" || res[memoryRequestKey] != "2Gi" || res[cpuLimitKey] != "2" || res[memoryLimitKey] != "4Gi" {
+ t.Errorf("unexpected resource telemetry: %+v", res)
+ }
+}
+
+func TestHandleMonitoringConsoles_ListError(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"MonitoringConsole": {}}}
+ ctx := context.TODO()
+ errClient := &errorClient{mockClient}
+ data, _, err := handleMonitoringConsoles(ctx, errClient)
+ if err == nil || err.Error() != "fail list" {
+ t.Errorf("expected error 'fail list', got %v", err)
+ }
+ if data != nil {
+ t.Errorf("expected nil, nil when error")
+ }
+}
+
+func TestHandleMonitoringConsoles_MultipleCRs(t *testing.T) {
+ mc1 := &enterpriseApi.MonitoringConsole{
+ ObjectMeta: metav1.ObjectMeta{Name: "mc1"},
+ Spec: enterpriseApi.MonitoringConsoleSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi")},
+ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("4Gi")},
+ },
+ },
+ },
+ },
+ }
+ mc2 := &enterpriseApi.MonitoringConsole{
+ ObjectMeta: metav1.ObjectMeta{Name: "mc2"},
+ Spec: enterpriseApi.MonitoringConsoleSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("3"), corev1.ResourceMemory: resource.MustParse("6Gi")},
+ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("4"), corev1.ResourceMemory: resource.MustParse("8Gi")},
+ },
+ },
+ },
+ },
+ }
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"MonitoringConsole": {mc1, mc2}}}
+ ctx := context.TODO()
+ data, _, err := handleMonitoringConsoles(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 2 {
+ t.Errorf("expected two telemetry entries")
+ }
+ res1, ok := m["mc1"].(map[string]string)
+ if !ok || res1[cpuRequestKey] != "1" || res1[memoryRequestKey] != "2Gi" || res1[cpuLimitKey] != "2" || res1[memoryLimitKey] != "4Gi" {
+ t.Errorf("unexpected resource telemetry for mc1: %+v", res1)
+ }
+ res2, ok := m["mc2"].(map[string]string)
+ if !ok || res2[cpuRequestKey] != "3" || res2[memoryRequestKey] != "6Gi" || res2[cpuLimitKey] != "4" || res2[memoryLimitKey] != "8Gi" {
+ t.Errorf("unexpected resource telemetry for mc2: %+v", res2)
+ }
+}
+
+// Error client for simulating List error in tests
+// Implements List to always return error
+
+type errorClient struct{ *FakeListClient }
+
+func (c *errorClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
+ return errors.New("fail list")
+}
+
+// --- TEST-ONLY PATCHABLE TELEMETRY CLIENT MOCKS ---
+
+// SplunkTelemetryClient is the interface for test patching (copied from production code if not imported)
+type SplunkTelemetryClient interface {
+ GetLicenseInfo() (map[string]splclient.LicenseInfo, error)
+ SendTelemetry(path string, body []byte) (interface{}, error)
+}
+
+// mockSplunkTelemetryClient is a test mock for SplunkTelemetryClient
+// Allows patching SendTelemetry and GetLicenseInfo
+// Use fields for function overrides
+type mockSplunkTelemetryClient struct {
+ GetLicenseInfoFunc func() (map[string]splclient.LicenseInfo, error)
+ SendTelemetryFunc func(path string, body []byte) (interface{}, error)
+}
+
+func (m *mockSplunkTelemetryClient) GetLicenseInfo() (map[string]splclient.LicenseInfo, error) {
+ if m.GetLicenseInfoFunc != nil {
+ return m.GetLicenseInfoFunc()
+ }
+ return map[string]splclient.LicenseInfo{"test": {}}, nil
+}
+func (m *mockSplunkTelemetryClient) SendTelemetry(path string, body []byte) (interface{}, error) {
+ if m.SendTelemetryFunc != nil {
+ return m.SendTelemetryFunc(path, body)
+ }
+ return nil, nil
+}
+
+// Patchable factory for tests (must match production variable name)
+var newSplunkClientFactory = func(uri, user, pass string) SplunkTelemetryClient {
+ return &mockSplunkTelemetryClient{}
+}
+
+// --- Tests for handleStandalones ---
+func TestHandleStandalones_NoCRs(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"Standalone": {}}}
+ ctx := context.TODO()
+ data, _, err := handleStandalones(ctx, mockClient)
+ if data != nil || err != nil {
+ t.Errorf("expected nil, nil, nil when no Standalone CRs exist")
+ }
+}
+func TestHandleStandalones_OneCR(t *testing.T) {
+ cr := &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "s1"},
+ Spec: enterpriseApi.StandaloneSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi")},
+ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("4Gi")},
+ },
+ },
+ },
+ },
+ }
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"Standalone": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleStandalones(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for s1")
+ }
+ res, ok := m["s1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for s1")
+ }
+ if res[cpuRequestKey] != "1" || res[memoryRequestKey] != "2Gi" || res[cpuLimitKey] != "2" || res[memoryLimitKey] != "4Gi" {
+ t.Errorf("unexpected resource telemetry: %+v", res)
+ }
+}
+func TestHandleStandalones_MultipleCRs(t *testing.T) {
+ cr1 := &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "s1"},
+ Spec: enterpriseApi.StandaloneSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")},
+ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2")},
+ },
+ },
+ },
+ },
+ }
+ cr2 := &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "s2"},
+ Spec: enterpriseApi.StandaloneSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("3")},
+ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("4")},
+ },
+ },
+ },
+ },
+ }
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"Standalone": {cr1, cr2}}}
+ ctx := context.TODO()
+ data, _, err := handleStandalones(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 2 {
+ t.Errorf("expected two telemetry entries")
+ }
+}
+
+type errorStandaloneClient struct{ *FakeListClient }
+
+func (c *errorStandaloneClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
+ return errors.New("fail list")
+}
+func TestHandleStandalones_ListError(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"Standalone": {}}}
+ ctx := context.TODO()
+ errClient := &errorStandaloneClient{mockClient}
+ data, _, err := handleStandalones(ctx, errClient)
+ if err == nil || err.Error() != "fail list" {
+ t.Errorf("expected error 'fail list', got %v", err)
+ }
+ if data != nil {
+ t.Errorf("expected nil, nil when error")
+ }
+}
+func TestHandleStandalones_EdgeResourceSpecs(t *testing.T) {
+ cr := &enterpriseApi.Standalone{ObjectMeta: metav1.ObjectMeta{Name: "s1"}, Spec: enterpriseApi.StandaloneSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{}}}}}
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"Standalone": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleStandalones(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for s1")
+ }
+ res, ok := m["s1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for s1")
+ }
+ if res[cpuRequestKey] != "" || res[memoryRequestKey] != "" || res[cpuLimitKey] != "" || res[memoryLimitKey] != "" {
+ // Acceptable: all empty
+ } else {
+ t.Errorf("unexpected resource telemetry for edge case: %+v", res)
+ }
+}
+
+// --- Tests for handleLicenseManagers ---
+func TestHandleLicenseManagers_NoCRs(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"LicenseManager": {}}}
+ ctx := context.TODO()
+ data, _, err := handleLicenseManagers(ctx, mockClient)
+ if data != nil || err != nil {
+ t.Errorf("expected nil, nil, nil when no LicenseManager CRs exist")
+ }
+}
+func TestHandleLicenseManagers_OneCR(t *testing.T) {
+ cr := &enterpriseApi.LicenseManager{
+ ObjectMeta: metav1.ObjectMeta{Name: "lm1"},
+ Spec: enterpriseApi.LicenseManagerSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi")},
+ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("4Gi")},
+ },
+ },
+ },
+ },
+ }
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"LicenseManager": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleLicenseManagers(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for lm1")
+ }
+ res, ok := m["lm1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for lm1")
+ }
+ if res[cpuRequestKey] != "1" || res[memoryRequestKey] != "2Gi" || res[cpuLimitKey] != "2" || res[memoryLimitKey] != "4Gi" {
+ t.Errorf("unexpected resource telemetry: %+v", res)
+ }
+}
+func TestHandleLicenseManagers_MultipleCRs(t *testing.T) {
+ cr1 := &enterpriseApi.LicenseManager{ObjectMeta: metav1.ObjectMeta{Name: "lm1"}, Spec: enterpriseApi.LicenseManagerSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2")}}}}}}
+ cr2 := &enterpriseApi.LicenseManager{ObjectMeta: metav1.ObjectMeta{Name: "lm2"}, Spec: enterpriseApi.LicenseManagerSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("3")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("4")}}}}}}
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"LicenseManager": {cr1, cr2}}}
+ ctx := context.TODO()
+ data, _, err := handleLicenseManagers(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 2 {
+ t.Errorf("expected two telemetry entries")
+ }
+}
+
+type errorLicenseManagerClient struct{ *FakeListClient }
+
+func (c *errorLicenseManagerClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
+ return errors.New("fail list")
+}
+func TestHandleLicenseManagers_ListError(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"LicenseManager": {}}}
+ ctx := context.TODO()
+ errClient := &errorLicenseManagerClient{mockClient}
+ data, _, err := handleLicenseManagers(ctx, errClient)
+ if err == nil || err.Error() != "fail list" {
+ t.Errorf("expected error 'fail list', got %v", err)
+ }
+ if data != nil {
+ t.Errorf("expected nil, nil when error")
+ }
+}
+func TestHandleLicenseManagers_EdgeResourceSpecs(t *testing.T) {
+ cr := &enterpriseApi.LicenseManager{ObjectMeta: metav1.ObjectMeta{Name: "lm1"}, Spec: enterpriseApi.LicenseManagerSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{}}}}}
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"LicenseManager": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleLicenseManagers(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for lm1")
+ }
+ res, ok := m["lm1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for lm1")
+ }
+ if res[cpuRequestKey] != "" || res[memoryRequestKey] != "" || res[cpuLimitKey] != "" || res[memoryLimitKey] != "" {
+ // Acceptable: all empty
+ } else {
+ t.Errorf("unexpected resource telemetry for edge case: %+v", res)
+ }
+}
+
+// --- Tests for handleLicenseMasters ---
+func TestHandleLicenseMasters_NoCRs(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"LicenseMaster": {}}}
+ ctx := context.TODO()
+ data, _, err := handleLicenseMasters(ctx, mockClient)
+ if data != nil || err != nil {
+ t.Errorf("expected nil, nil, nil when no LicenseMaster CRs exist")
+ }
+}
+func TestHandleLicenseMasters_OneCR(t *testing.T) {
+ cr := &enterpriseApiV3.LicenseMaster{
+ ObjectMeta: metav1.ObjectMeta{Name: "lm1"},
+ Spec: enterpriseApiV3.LicenseMasterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi")},
+ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("4Gi")},
+ },
+ },
+ },
+ },
+ }
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"LicenseMaster": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleLicenseMasters(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for lm1")
+ }
+ res, ok := m["lm1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for lm1")
+ }
+ if res[cpuRequestKey] != "1" || res[memoryRequestKey] != "2Gi" || res[cpuLimitKey] != "2" || res[memoryLimitKey] != "4Gi" {
+ t.Errorf("unexpected resource telemetry: %+v", res)
+ }
+}
+func TestHandleLicenseMasters_MultipleCRs(t *testing.T) {
+ cr1 := &enterpriseApiV3.LicenseMaster{ObjectMeta: metav1.ObjectMeta{Name: "lm1"}, Spec: enterpriseApiV3.LicenseMasterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2")}}}}}}
+ cr2 := &enterpriseApiV3.LicenseMaster{ObjectMeta: metav1.ObjectMeta{Name: "lm2"}, Spec: enterpriseApiV3.LicenseMasterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("3")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("4")}}}}}}
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"LicenseMaster": {cr1, cr2}}}
+ ctx := context.TODO()
+ data, _, err := handleLicenseMasters(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 2 {
+ t.Errorf("expected two telemetry entries")
+ }
+}
+
+type errorLicenseMasterClient struct{ *FakeListClient }
+
+func (c *errorLicenseMasterClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
+ return errors.New("fail list")
+}
+func TestHandleLicenseMasters_ListError(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"LicenseMaster": {}}}
+ ctx := context.TODO()
+ errClient := &errorLicenseMasterClient{mockClient}
+ data, _, err := handleLicenseMasters(ctx, errClient)
+ if err == nil || err.Error() != "fail list" {
+ t.Errorf("expected error 'fail list', got %v", err)
+ }
+ if data != nil {
+ t.Errorf("expected nil, nil when error")
+ }
+}
+func TestHandleLicenseMasters_EdgeResourceSpecs(t *testing.T) {
+ cr := &enterpriseApiV3.LicenseMaster{ObjectMeta: metav1.ObjectMeta{Name: "lm1"}, Spec: enterpriseApiV3.LicenseMasterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{}}}}}
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"LicenseMaster": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleLicenseMasters(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for lm1")
+ }
+ res, ok := m["lm1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for lm1")
+ }
+ if res[cpuRequestKey] != "" || res[memoryRequestKey] != "" || res[cpuLimitKey] != "" || res[memoryLimitKey] != "" {
+ // Acceptable: all empty
+ } else {
+ t.Errorf("unexpected resource telemetry for edge case: %+v", res)
+ }
+}
+
+// --- Tests for handleSearchHeadClusters ---
+func TestHandleSearchHeadClusters_NoCRs(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"SearchHeadCluster": {}}}
+ ctx := context.TODO()
+ data, _, err := handleSearchHeadClusters(ctx, mockClient)
+ if data != nil || err != nil {
+ t.Errorf("expected nil, nil, nil when no SearchHeadCluster CRs exist")
+ }
+}
+func TestHandleSearchHeadClusters_OneCR(t *testing.T) {
+ cr := &enterpriseApi.SearchHeadCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "shc1"},
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi")},
+ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("4Gi")},
+ },
+ },
+ },
+ },
+ }
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"SearchHeadCluster": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleSearchHeadClusters(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for shc1")
+ }
+ res, ok := m["shc1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for shc1")
+ }
+ if res[cpuRequestKey] != "1" || res[memoryRequestKey] != "2Gi" || res[cpuLimitKey] != "2" || res[memoryLimitKey] != "4Gi" {
+ t.Errorf("unexpected resource telemetry: %+v", res)
+ }
+}
+func TestHandleSearchHeadClusters_MultipleCRs(t *testing.T) {
+ cr1 := &enterpriseApi.SearchHeadCluster{ObjectMeta: metav1.ObjectMeta{Name: "shc1"}, Spec: enterpriseApi.SearchHeadClusterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2")}}}}}}
+ cr2 := &enterpriseApi.SearchHeadCluster{ObjectMeta: metav1.ObjectMeta{Name: "shc2"}, Spec: enterpriseApi.SearchHeadClusterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("3")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("4")}}}}}}
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"SearchHeadCluster": {cr1, cr2}}}
+ ctx := context.TODO()
+ data, _, err := handleSearchHeadClusters(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 2 {
+ t.Errorf("expected two telemetry entries")
+ }
+}
+
+type errorSearchHeadClusterClient struct{ *FakeListClient }
+
+func (c *errorSearchHeadClusterClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
+ return errors.New("fail list")
+}
+func TestHandleSearchHeadClusters_ListError(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"SearchHeadCluster": {}}}
+ ctx := context.TODO()
+ errClient := &errorSearchHeadClusterClient{mockClient}
+ data, _, err := handleSearchHeadClusters(ctx, errClient)
+ if err == nil || err.Error() != "fail list" {
+ t.Errorf("expected error 'fail list', got %v", err)
+ }
+ if data != nil {
+ t.Errorf("expected nil, nil when error")
+ }
+}
+func TestHandleSearchHeadClusters_EdgeResourceSpecs(t *testing.T) {
+ cr := &enterpriseApi.SearchHeadCluster{ObjectMeta: metav1.ObjectMeta{Name: "shc1"}, Spec: enterpriseApi.SearchHeadClusterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{}}}}}
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"SearchHeadCluster": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleSearchHeadClusters(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for shc1")
+ }
+ res, ok := m["shc1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for shc1")
+ }
+ if res[cpuRequestKey] != "" || res[memoryRequestKey] != "" || res[cpuLimitKey] != "" || res[memoryLimitKey] != "" {
+ // Acceptable: all empty
+ } else {
+ t.Errorf("unexpected resource telemetry for edge case: %+v", res)
+ }
+}
+
+// --- Tests for handleIndexerClusters ---
+func TestHandleIndexerClusters_NoCRs(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"IndexerCluster": {}}}
+ ctx := context.TODO()
+ data, _, err := handleIndexerClusters(ctx, mockClient)
+ if data != nil || err != nil {
+ t.Errorf("expected nil, nil, nil when no IndexerCluster CRs exist")
+ }
+}
+func TestHandleIndexerClusters_OneCR(t *testing.T) {
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "idx1"},
+ Spec: enterpriseApi.IndexerClusterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi")},
+ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("4Gi")},
+ },
+ },
+ },
+ },
+ }
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"IndexerCluster": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleIndexerClusters(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for idx1")
+ }
+ res, ok := m["idx1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for idx1")
+ }
+ if res[cpuRequestKey] != "1" || res[memoryRequestKey] != "2Gi" || res[cpuLimitKey] != "2" || res[memoryLimitKey] != "4Gi" {
+ t.Errorf("unexpected resource telemetry: %+v", res)
+ }
+}
+func TestHandleIndexerClusters_MultipleCRs(t *testing.T) {
+ cr1 := &enterpriseApi.IndexerCluster{ObjectMeta: metav1.ObjectMeta{Name: "idx1"}, Spec: enterpriseApi.IndexerClusterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2")}}}}}}
+ cr2 := &enterpriseApi.IndexerCluster{ObjectMeta: metav1.ObjectMeta{Name: "idx2"}, Spec: enterpriseApi.IndexerClusterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("3")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("4")}}}}}}
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"IndexerCluster": {cr1, cr2}}}
+ ctx := context.TODO()
+ data, _, err := handleIndexerClusters(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 2 {
+ t.Errorf("expected two telemetry entries")
+ }
+}
+
+type errorIndexerClusterClient struct{ *FakeListClient }
+
+func (c *errorIndexerClusterClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
+ return errors.New("fail list")
+}
+func TestHandleIndexerClusters_ListError(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"IndexerCluster": {}}}
+ ctx := context.TODO()
+ errClient := &errorIndexerClusterClient{mockClient}
+ data, _, err := handleIndexerClusters(ctx, errClient)
+ if err == nil || err.Error() != "fail list" {
+ t.Errorf("expected error 'fail list', got %v", err)
+ }
+ if data != nil {
+ t.Errorf("expected nil, nil when error")
+ }
+}
+func TestHandleIndexerClusters_EdgeResourceSpecs(t *testing.T) {
+ cr := &enterpriseApi.IndexerCluster{ObjectMeta: metav1.ObjectMeta{Name: "idx1"}, Spec: enterpriseApi.IndexerClusterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{}}}}}
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"IndexerCluster": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleIndexerClusters(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for idx1")
+ }
+ res, ok := m["idx1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for idx1")
+ }
+ if res[cpuRequestKey] != "" || res[memoryRequestKey] != "" || res[cpuLimitKey] != "" || res[memoryLimitKey] != "" {
+ // Acceptable: all empty
+ } else {
+ t.Errorf("unexpected resource telemetry for edge case: %+v", res)
+ }
+}
+
+// --- Tests for handleClusterManagers ---
+func TestHandleClusterManagers_NoCRs(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"ClusterManager": {}}}
+ ctx := context.TODO()
+ data, _, err := handleClusterManagers(ctx, mockClient)
+ if data != nil || err != nil {
+ t.Errorf("expected nil, nil, nil when no ClusterManager CRs exist")
+ }
+}
+func TestHandleClusterManagers_OneCR(t *testing.T) {
+ cr := &enterpriseApi.ClusterManager{
+ ObjectMeta: metav1.ObjectMeta{Name: "cmgr1"},
+ Spec: enterpriseApi.ClusterManagerSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi")},
+ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("4Gi")},
+ },
+ },
+ },
+ },
+ }
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"ClusterManager": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleClusterManagers(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for cmgr1")
+ }
+ res, ok := m["cmgr1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for cmgr1")
+ }
+ if res[cpuRequestKey] != "1" || res[memoryRequestKey] != "2Gi" || res[cpuLimitKey] != "2" || res[memoryLimitKey] != "4Gi" {
+ t.Errorf("unexpected resource telemetry: %+v", res)
+ }
+}
+func TestHandleClusterManagers_MultipleCRs(t *testing.T) {
+ cr1 := &enterpriseApi.ClusterManager{ObjectMeta: metav1.ObjectMeta{Name: "cmgr1"}, Spec: enterpriseApi.ClusterManagerSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2")}}}}}}
+ cr2 := &enterpriseApi.ClusterManager{ObjectMeta: metav1.ObjectMeta{Name: "cmgr2"}, Spec: enterpriseApi.ClusterManagerSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("3")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("4")}}}}}}
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"ClusterManager": {cr1, cr2}}}
+ ctx := context.TODO()
+ data, _, err := handleClusterManagers(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 2 {
+ t.Errorf("expected two telemetry entries")
+ }
+}
+
+type errorClusterManagerClient struct{ *FakeListClient }
+
+func (c *errorClusterManagerClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
+ return errors.New("fail list")
+}
+func TestHandleClusterManagers_ListError(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"ClusterManager": {}}}
+ ctx := context.TODO()
+ errClient := &errorClusterManagerClient{mockClient}
+ data, _, err := handleClusterManagers(ctx, errClient)
+ if err == nil || err.Error() != "fail list" {
+ t.Errorf("expected error 'fail list', got %v", err)
+ }
+ if data != nil {
+ t.Errorf("expected nil, nil when error")
+ }
+}
+func TestHandleClusterManagers_EdgeResourceSpecs(t *testing.T) {
+ cr := &enterpriseApi.ClusterManager{ObjectMeta: metav1.ObjectMeta{Name: "cmgr1"}, Spec: enterpriseApi.ClusterManagerSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{}}}}}
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"ClusterManager": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleClusterManagers(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for cmgr1")
+ }
+ res, ok := m["cmgr1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for cmgr1")
+ }
+ if res[cpuRequestKey] != "" || res[memoryRequestKey] != "" || res[cpuLimitKey] != "" || res[memoryLimitKey] != "" {
+ // Acceptable: all empty
+ } else {
+ t.Errorf("unexpected resource telemetry for edge case: %+v", res)
+ }
+}
+
+// --- Tests for handleClusterMasters ---
+func TestHandleClusterMasters_NoCRs(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"ClusterMaster": {}}}
+ ctx := context.TODO()
+ data, _, err := handleClusterMasters(ctx, mockClient)
+ if data != nil || err != nil {
+ t.Errorf("expected nil, nil, nil when no ClusterMaster CRs exist")
+ }
+}
+func TestHandleClusterMasters_OneCR(t *testing.T) {
+ cr := &enterpriseApiV3.ClusterMaster{
+ ObjectMeta: metav1.ObjectMeta{Name: "cmast1"},
+ Spec: enterpriseApiV3.ClusterMasterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi")},
+ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("4Gi")},
+ },
+ },
+ },
+ },
+ }
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"ClusterMaster": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleClusterMasters(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for cmast1")
+ }
+ res, ok := m["cmast1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for cmast1")
+ }
+ if res[cpuRequestKey] != "1" || res[memoryRequestKey] != "2Gi" || res[cpuLimitKey] != "2" || res[memoryLimitKey] != "4Gi" {
+ t.Errorf("unexpected resource telemetry: %+v", res)
+ }
+}
+func TestHandleClusterMasters_MultipleCRs(t *testing.T) {
+ cr1 := &enterpriseApiV3.ClusterMaster{ObjectMeta: metav1.ObjectMeta{Name: "cmast1"}, Spec: enterpriseApiV3.ClusterMasterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2")}}}}}}
+ cr2 := &enterpriseApiV3.ClusterMaster{ObjectMeta: metav1.ObjectMeta{Name: "cmast2"}, Spec: enterpriseApiV3.ClusterMasterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("3")}, Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("4")}}}}}}
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"ClusterMaster": {cr1, cr2}}}
+ ctx := context.TODO()
+ data, _, err := handleClusterMasters(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 2 {
+ t.Errorf("expected two telemetry entries")
+ }
+}
+
+type errorClusterMasterClient struct{ *FakeListClient }
+
+func (c *errorClusterMasterClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
+ return errors.New("fail list")
+}
+func TestHandleClusterMasters_ListError(t *testing.T) {
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"ClusterMaster": {}}}
+ ctx := context.TODO()
+ errClient := &errorClusterMasterClient{mockClient}
+ data, _, err := handleClusterMasters(ctx, errClient)
+ if err == nil {
+ t.Errorf("expected error 'fail list', got %v", err)
+ }
+ if data != nil {
+ t.Errorf("expected nil, nil when error")
+ }
+}
+func TestHandleClusterMasters_EdgeResourceSpecs(t *testing.T) {
+ cr := &enterpriseApiV3.ClusterMaster{ObjectMeta: metav1.ObjectMeta{Name: "cmast1"}, Spec: enterpriseApiV3.ClusterMasterSpec{CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Spec: enterpriseApi.Spec{Resources: corev1.ResourceRequirements{}}}}}
+ mockClient := &FakeListClient{crs: map[string][]client.Object{"ClusterMaster": {cr}}}
+ ctx := context.TODO()
+ data, _, err := handleClusterMasters(ctx, mockClient)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ m, ok := data.(map[string]interface{})
+ if !ok || len(m) != 1 {
+ t.Errorf("expected one telemetry entry for cmast1")
+ }
+ res, ok := m["cmast1"].(map[string]string)
+ if !ok {
+ t.Errorf("expected resource telemetry for cmast1")
+ }
+ if res[cpuRequestKey] != "" || res[memoryRequestKey] != "" || res[cpuLimitKey] != "" || res[memoryLimitKey] != "" {
+ // Acceptable: all empty
+ } else {
+ t.Errorf("unexpected resource telemetry for edge case: %+v", res)
+ }
+}
diff --git a/pkg/splunk/enterprise/testdata/fixtures/statefulset_ingestor.json b/pkg/splunk/enterprise/testdata/fixtures/statefulset_ingestor.json
new file mode 100644
index 000000000..933f26f73
--- /dev/null
+++ b/pkg/splunk/enterprise/testdata/fixtures/statefulset_ingestor.json
@@ -0,0 +1,207 @@
+{
+ "kind": "StatefulSet",
+ "apiVersion": "apps/v1",
+ "metadata": {
+ "name": "splunk-test-ingestor",
+ "namespace": "test",
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ },
+ "ownerReferences": [
+ {
+ "apiVersion": "",
+ "kind": "IngestorCluster",
+ "name": "test",
+ "uid": "",
+ "controller": true
+ }
+ ]
+ },
+ "spec": {
+ "replicas": 1,
+ "selector": {
+ "matchLabels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ }
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ },
+ "annotations": {
+ "traffic.sidecar.istio.io/excludeOutboundPorts": "8089,8191,9997",
+ "traffic.sidecar.istio.io/includeInboundPorts": "8000,8088"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "splunk-test-probe-configmap",
+ "configMap": {
+ "name": "splunk-test-probe-configmap",
+ "defaultMode": 365
+ }
+ },
+ {
+ "name": "mnt-splunk-secrets",
+ "secret": {
+ "secretName": "splunk-test-ingestor-secret-v1",
+ "defaultMode": 420
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "splunk",
+ "image": "splunk/splunk",
+ "ports": [
+ { "name": "http-splunkweb", "containerPort": 8000, "protocol": "TCP" },
+ { "name": "http-hec", "containerPort": 8088, "protocol": "TCP" },
+ { "name": "https-splunkd", "containerPort": 8089, "protocol": "TCP" },
+ { "name": "tcp-s2s", "containerPort": 9997, "protocol": "TCP" },
+ { "name": "user-defined", "containerPort": 32000, "protocol": "UDP" }
+ ],
+ "env": [
+ { "name": "SPLUNK_HOME", "value": "/opt/splunk" },
+ { "name": "SPLUNK_START_ARGS", "value": "--accept-license" },
+ { "name": "SPLUNK_DEFAULTS_URL", "value": "/mnt/splunk-secrets/default.yml" },
+ { "name": "SPLUNK_HOME_OWNERSHIP_ENFORCEMENT", "value": "false" },
+ { "name": "SPLUNK_ROLE", "value": "splunk_ingestor" },
+ { "name": "SPLUNK_DECLARATIVE_ADMIN_PASSWORD", "value": "true" },
+ { "name": "SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH", "value": "/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh" },
+ { "name": "SPLUNK_GENERAL_TERMS", "value": "--accept-sgt-current-at-splunk-com" },
+ { "name": "SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH", "value": "true" }
+ ],
+ "resources": {
+ "limits": { "cpu": "4", "memory": "8Gi" },
+ "requests": { "cpu": "100m", "memory": "512Mi" }
+ },
+ "volumeMounts": [
+ { "name": "pvc-etc", "mountPath": "/opt/splunk/etc" },
+ { "name": "pvc-var", "mountPath": "/opt/splunk/var" },
+ { "name": "splunk-test-probe-configmap", "mountPath": "/mnt/probes" },
+ { "name": "mnt-splunk-secrets", "mountPath": "/mnt/splunk-secrets" }
+ ],
+ "livenessProbe": {
+ "exec": { "command": ["/mnt/probes/livenessProbe.sh"] },
+ "initialDelaySeconds": 30,
+ "timeoutSeconds": 30,
+ "periodSeconds": 30,
+ "failureThreshold": 3
+ },
+ "readinessProbe": {
+ "exec": { "command": ["/mnt/probes/readinessProbe.sh"] },
+ "initialDelaySeconds": 10,
+ "timeoutSeconds": 5,
+ "periodSeconds": 5,
+ "failureThreshold": 3
+ },
+ "startupProbe": {
+ "exec": { "command": ["/mnt/probes/startupProbe.sh"] },
+ "initialDelaySeconds": 40,
+ "timeoutSeconds": 30,
+ "periodSeconds": 30,
+ "failureThreshold": 12
+ },
+ "imagePullPolicy": "IfNotPresent",
+ "securityContext": {
+ "capabilities": { "add": ["NET_BIND_SERVICE"], "drop": ["ALL"] },
+ "privileged": false,
+ "runAsUser": 41812,
+ "runAsNonRoot": true,
+ "allowPrivilegeEscalation": false,
+ "seccompProfile": { "type": "RuntimeDefault" }
+ }
+ }
+ ],
+ "securityContext": {
+ "runAsUser": 41812,
+ "runAsNonRoot": true,
+ "fsGroup": 41812,
+ "fsGroupChangePolicy": "OnRootMismatch"
+ },
+ "affinity": {
+ "podAntiAffinity": {
+ "preferredDuringSchedulingIgnoredDuringExecution": [
+ {
+ "weight": 100,
+ "podAffinityTerm": {
+ "labelSelector": {
+ "matchExpressions": [
+ {
+ "key": "app.kubernetes.io/instance",
+ "operator": "In",
+ "values": ["splunk-test-ingestor"]
+ }
+ ]
+ },
+ "topologyKey": "kubernetes.io/hostname"
+ }
+ }
+ ]
+ }
+ },
+ "schedulerName": "default-scheduler"
+ }
+ },
+ "volumeClaimTemplates": [
+ {
+ "metadata": {
+ "name": "pvc-etc",
+ "namespace": "test",
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ }
+ },
+ "spec": {
+ "accessModes": ["ReadWriteOnce"],
+ "resources": { "requests": { "storage": "10Gi" } }
+ },
+ "status": {}
+ },
+ {
+ "metadata": {
+ "name": "pvc-var",
+ "namespace": "test",
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ }
+ },
+ "spec": {
+ "accessModes": ["ReadWriteOnce"],
+ "resources": { "requests": { "storage": "100Gi" } }
+ },
+ "status": {}
+ }
+ ],
+ "serviceName": "splunk-test-ingestor-headless",
+ "podManagementPolicy": "Parallel",
+ "updateStrategy": { "type": "OnDelete" }
+ },
+ "status": { "replicas": 0, "availableReplicas": 0 }
+}
\ No newline at end of file
diff --git a/pkg/splunk/enterprise/testdata/fixtures/statefulset_ingestor_with_extraenv.json b/pkg/splunk/enterprise/testdata/fixtures/statefulset_ingestor_with_extraenv.json
new file mode 100644
index 000000000..581598ecf
--- /dev/null
+++ b/pkg/splunk/enterprise/testdata/fixtures/statefulset_ingestor_with_extraenv.json
@@ -0,0 +1,209 @@
+{
+ "kind": "StatefulSet",
+ "apiVersion": "apps/v1",
+ "metadata": {
+ "name": "splunk-test-ingestor",
+ "namespace": "test",
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ },
+ "ownerReferences": [
+ {
+ "apiVersion": "",
+ "kind": "IngestorCluster",
+ "name": "test",
+ "uid": "",
+ "controller": true
+ }
+ ]
+ },
+ "spec": {
+ "replicas": 1,
+ "selector": {
+ "matchLabels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ }
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ },
+ "annotations": {
+ "traffic.sidecar.istio.io/excludeOutboundPorts": "8089,8191,9997",
+ "traffic.sidecar.istio.io/includeInboundPorts": "8000,8088"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "splunk-test-probe-configmap",
+ "configMap": {
+ "name": "splunk-test-probe-configmap",
+ "defaultMode": 365
+ }
+ },
+ {
+ "name": "mnt-splunk-secrets",
+ "secret": {
+ "secretName": "splunk-test-ingestor-secret-v1",
+ "defaultMode": 420
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "splunk",
+ "image": "splunk/splunk",
+ "ports": [
+ { "name": "http-splunkweb", "containerPort": 8000, "protocol": "TCP" },
+ { "name": "http-hec", "containerPort": 8088, "protocol": "TCP" },
+ { "name": "https-splunkd", "containerPort": 8089, "protocol": "TCP" },
+ { "name": "tcp-s2s", "containerPort": 9997, "protocol": "TCP" },
+ { "name": "user-defined", "containerPort": 32000, "protocol": "UDP" }
+ ],
+ "env": [
+ { "name": "TEST_ENV_VAR", "value": "test_value" },
+ { "name": "SPLUNK_HOME", "value": "/opt/splunk" },
+ { "name": "SPLUNK_START_ARGS", "value": "--accept-license" },
+ { "name": "SPLUNK_DEFAULTS_URL", "value": "/mnt/splunk-secrets/default.yml" },
+ { "name": "SPLUNK_HOME_OWNERSHIP_ENFORCEMENT", "value": "false" },
+ { "name": "SPLUNK_ROLE", "value": "splunk_ingestor" },
+ { "name": "SPLUNK_DECLARATIVE_ADMIN_PASSWORD", "value": "true" },
+ { "name": "SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH", "value": "/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh" },
+ { "name": "SPLUNK_GENERAL_TERMS", "value": "--accept-sgt-current-at-splunk-com" },
+ { "name": "SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH", "value": "true" }
+ ],
+ "resources": {
+ "limits": { "cpu": "4", "memory": "8Gi" },
+ "requests": { "cpu": "100m", "memory": "512Mi" }
+ },
+ "volumeMounts": [
+ { "name": "pvc-etc", "mountPath": "/opt/splunk/etc" },
+ { "name": "pvc-var", "mountPath": "/opt/splunk/var" },
+ { "name": "splunk-test-probe-configmap", "mountPath": "/mnt/probes" },
+ { "name": "mnt-splunk-secrets", "mountPath": "/mnt/splunk-secrets" }
+ ],
+ "livenessProbe": {
+ "exec": { "command": ["/mnt/probes/livenessProbe.sh"] },
+ "initialDelaySeconds": 30,
+ "timeoutSeconds": 30,
+ "periodSeconds": 30,
+ "failureThreshold": 3
+ },
+ "readinessProbe": {
+ "exec": { "command": ["/mnt/probes/readinessProbe.sh"] },
+ "initialDelaySeconds": 10,
+ "timeoutSeconds": 5,
+ "periodSeconds": 5,
+ "failureThreshold": 3
+ },
+ "startupProbe": {
+ "exec": { "command": ["/mnt/probes/startupProbe.sh"] },
+ "initialDelaySeconds": 40,
+ "timeoutSeconds": 30,
+ "periodSeconds": 30,
+ "failureThreshold": 12
+ },
+ "imagePullPolicy": "IfNotPresent",
+ "securityContext": {
+ "capabilities": { "add": ["NET_BIND_SERVICE"], "drop": ["ALL"] },
+ "privileged": false,
+ "runAsUser": 41812,
+ "runAsNonRoot": true,
+ "allowPrivilegeEscalation": false,
+ "seccompProfile": { "type": "RuntimeDefault" }
+ }
+ }
+ ],
+ "serviceAccountName": "defaults",
+ "securityContext": {
+ "runAsUser": 41812,
+ "runAsNonRoot": true,
+ "fsGroup": 41812,
+ "fsGroupChangePolicy": "OnRootMismatch"
+ },
+ "affinity": {
+ "podAntiAffinity": {
+ "preferredDuringSchedulingIgnoredDuringExecution": [
+ {
+ "weight": 100,
+ "podAffinityTerm": {
+ "labelSelector": {
+ "matchExpressions": [
+ {
+ "key": "app.kubernetes.io/instance",
+ "operator": "In",
+ "values": ["splunk-test-ingestor"]
+ }
+ ]
+ },
+ "topologyKey": "kubernetes.io/hostname"
+ }
+ }
+ ]
+ }
+ },
+ "schedulerName": "default-scheduler"
+ }
+ },
+ "volumeClaimTemplates": [
+ {
+ "metadata": {
+ "name": "pvc-etc",
+ "namespace": "test",
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ }
+ },
+ "spec": {
+ "accessModes": ["ReadWriteOnce"],
+ "resources": { "requests": { "storage": "10Gi" } }
+ },
+ "status": {}
+ },
+ {
+ "metadata": {
+ "name": "pvc-var",
+ "namespace": "test",
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ }
+ },
+ "spec": {
+ "accessModes": ["ReadWriteOnce"],
+ "resources": { "requests": { "storage": "100Gi" } }
+ },
+ "status": {}
+ }
+ ],
+ "serviceName": "splunk-test-ingestor-headless",
+ "podManagementPolicy": "Parallel",
+ "updateStrategy": { "type": "OnDelete" }
+ },
+ "status": { "replicas": 0, "availableReplicas": 0 }
+}
\ No newline at end of file
diff --git a/pkg/splunk/enterprise/testdata/fixtures/statefulset_ingestor_with_labels.json b/pkg/splunk/enterprise/testdata/fixtures/statefulset_ingestor_with_labels.json
new file mode 100644
index 000000000..9a35ffab7
--- /dev/null
+++ b/pkg/splunk/enterprise/testdata/fixtures/statefulset_ingestor_with_labels.json
@@ -0,0 +1,213 @@
+{
+ "kind": "StatefulSet",
+ "apiVersion": "apps/v1",
+ "metadata": {
+ "name": "splunk-test-ingestor",
+ "namespace": "test",
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor",
+ "app.kubernetes.io/test-extra-label": "test-extra-label-value"
+ },
+ "ownerReferences": [
+ {
+ "apiVersion": "",
+ "kind": "IngestorCluster",
+ "name": "test",
+ "uid": "",
+ "controller": true
+ }
+ ]
+ },
+ "spec": {
+ "replicas": 1,
+ "selector": {
+ "matchLabels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ }
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor",
+ "app.kubernetes.io/test-extra-label": "test-extra-label-value"
+ },
+ "annotations": {
+ "traffic.sidecar.istio.io/excludeOutboundPorts": "8089,8191,9997",
+ "traffic.sidecar.istio.io/includeInboundPorts": "8000,8088"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "splunk-test-probe-configmap",
+ "configMap": {
+ "name": "splunk-test-probe-configmap",
+ "defaultMode": 365
+ }
+ },
+ {
+ "name": "mnt-splunk-secrets",
+ "secret": {
+ "secretName": "splunk-test-ingestor-secret-v1",
+ "defaultMode": 420
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "splunk",
+ "image": "splunk/splunk",
+ "ports": [
+ { "name": "http-splunkweb", "containerPort": 8000, "protocol": "TCP" },
+ { "name": "http-hec", "containerPort": 8088, "protocol": "TCP" },
+ { "name": "https-splunkd", "containerPort": 8089, "protocol": "TCP" },
+ { "name": "tcp-s2s", "containerPort": 9997, "protocol": "TCP" },
+ { "name": "user-defined", "containerPort": 32000, "protocol": "UDP" }
+ ],
+ "env": [
+ { "name": "TEST_ENV_VAR", "value": "test_value" },
+ { "name": "SPLUNK_HOME", "value": "/opt/splunk" },
+ { "name": "SPLUNK_START_ARGS", "value": "--accept-license" },
+ { "name": "SPLUNK_DEFAULTS_URL", "value": "/mnt/splunk-secrets/default.yml" },
+ { "name": "SPLUNK_HOME_OWNERSHIP_ENFORCEMENT", "value": "false" },
+ { "name": "SPLUNK_ROLE", "value": "splunk_ingestor" },
+ { "name": "SPLUNK_DECLARATIVE_ADMIN_PASSWORD", "value": "true" },
+ { "name": "SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH", "value": "/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh" },
+ { "name": "SPLUNK_GENERAL_TERMS", "value": "--accept-sgt-current-at-splunk-com" },
+ { "name": "SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH", "value": "true" }
+ ],
+ "resources": {
+ "limits": { "cpu": "4", "memory": "8Gi" },
+ "requests": { "cpu": "100m", "memory": "512Mi" }
+ },
+ "volumeMounts": [
+ { "name": "pvc-etc", "mountPath": "/opt/splunk/etc" },
+ { "name": "pvc-var", "mountPath": "/opt/splunk/var" },
+ { "name": "splunk-test-probe-configmap", "mountPath": "/mnt/probes" },
+ { "name": "mnt-splunk-secrets", "mountPath": "/mnt/splunk-secrets" }
+ ],
+ "livenessProbe": {
+ "exec": { "command": ["/mnt/probes/livenessProbe.sh"] },
+ "initialDelaySeconds": 30,
+ "timeoutSeconds": 30,
+ "periodSeconds": 30,
+ "failureThreshold": 3
+ },
+ "readinessProbe": {
+ "exec": { "command": ["/mnt/probes/readinessProbe.sh"] },
+ "initialDelaySeconds": 10,
+ "timeoutSeconds": 5,
+ "periodSeconds": 5,
+ "failureThreshold": 3
+ },
+ "startupProbe": {
+ "exec": { "command": ["/mnt/probes/startupProbe.sh"] },
+ "initialDelaySeconds": 40,
+ "timeoutSeconds": 30,
+ "periodSeconds": 30,
+ "failureThreshold": 12
+ },
+ "imagePullPolicy": "IfNotPresent",
+ "securityContext": {
+ "capabilities": { "add": ["NET_BIND_SERVICE"], "drop": ["ALL"] },
+ "privileged": false,
+ "runAsUser": 41812,
+ "runAsNonRoot": true,
+ "allowPrivilegeEscalation": false,
+ "seccompProfile": { "type": "RuntimeDefault" }
+ }
+ }
+ ],
+ "serviceAccountName": "defaults",
+ "securityContext": {
+ "runAsUser": 41812,
+ "runAsNonRoot": true,
+ "fsGroup": 41812,
+ "fsGroupChangePolicy": "OnRootMismatch"
+ },
+ "affinity": {
+ "podAntiAffinity": {
+ "preferredDuringSchedulingIgnoredDuringExecution": [
+ {
+ "weight": 100,
+ "podAffinityTerm": {
+ "labelSelector": {
+ "matchExpressions": [
+ {
+ "key": "app.kubernetes.io/instance",
+ "operator": "In",
+ "values": ["splunk-test-ingestor"]
+ }
+ ]
+ },
+ "topologyKey": "kubernetes.io/hostname"
+ }
+ }
+ ]
+ }
+ },
+ "schedulerName": "default-scheduler"
+ }
+ },
+ "volumeClaimTemplates": [
+ {
+ "metadata": {
+ "name": "pvc-etc",
+ "namespace": "test",
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor",
+ "app.kubernetes.io/test-extra-label": "test-extra-label-value"
+ }
+ },
+ "spec": {
+ "accessModes": ["ReadWriteOnce"],
+ "resources": { "requests": { "storage": "10Gi" } }
+ },
+ "status": {}
+ },
+ {
+ "metadata": {
+ "name": "pvc-var",
+ "namespace": "test",
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor",
+ "app.kubernetes.io/test-extra-label": "test-extra-label-value"
+ }
+ },
+ "spec": {
+ "accessModes": ["ReadWriteOnce"],
+ "resources": { "requests": { "storage": "100Gi" } }
+ },
+ "status": {}
+ }
+ ],
+ "serviceName": "splunk-test-ingestor-headless",
+ "podManagementPolicy": "Parallel",
+ "updateStrategy": { "type": "OnDelete" }
+ },
+ "status": { "replicas": 0, "availableReplicas": 0 }
+}
\ No newline at end of file
diff --git a/pkg/splunk/enterprise/testdata/fixtures/statefulset_ingestor_with_serviceaccount.json b/pkg/splunk/enterprise/testdata/fixtures/statefulset_ingestor_with_serviceaccount.json
new file mode 100644
index 000000000..eb261195d
--- /dev/null
+++ b/pkg/splunk/enterprise/testdata/fixtures/statefulset_ingestor_with_serviceaccount.json
@@ -0,0 +1,208 @@
+{
+ "kind": "StatefulSet",
+ "apiVersion": "apps/v1",
+ "metadata": {
+ "name": "splunk-test-ingestor",
+ "namespace": "test",
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ },
+ "ownerReferences": [
+ {
+ "apiVersion": "",
+ "kind": "IngestorCluster",
+ "name": "test",
+ "uid": "",
+ "controller": true
+ }
+ ]
+ },
+ "spec": {
+ "replicas": 1,
+ "selector": {
+ "matchLabels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ }
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ },
+ "annotations": {
+ "traffic.sidecar.istio.io/excludeOutboundPorts": "8089,8191,9997",
+ "traffic.sidecar.istio.io/includeInboundPorts": "8000,8088"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "splunk-test-probe-configmap",
+ "configMap": {
+ "name": "splunk-test-probe-configmap",
+ "defaultMode": 365
+ }
+ },
+ {
+ "name": "mnt-splunk-secrets",
+ "secret": {
+ "secretName": "splunk-test-ingestor-secret-v1",
+ "defaultMode": 420
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "splunk",
+ "image": "splunk/splunk",
+ "ports": [
+ { "name": "http-splunkweb", "containerPort": 8000, "protocol": "TCP" },
+ { "name": "http-hec", "containerPort": 8088, "protocol": "TCP" },
+ { "name": "https-splunkd", "containerPort": 8089, "protocol": "TCP" },
+ { "name": "tcp-s2s", "containerPort": 9997, "protocol": "TCP" },
+ { "name": "user-defined", "containerPort": 32000, "protocol": "UDP" }
+ ],
+ "env": [
+ { "name": "SPLUNK_HOME", "value": "/opt/splunk" },
+ { "name": "SPLUNK_START_ARGS", "value": "--accept-license" },
+ { "name": "SPLUNK_DEFAULTS_URL", "value": "/mnt/splunk-secrets/default.yml" },
+ { "name": "SPLUNK_HOME_OWNERSHIP_ENFORCEMENT", "value": "false" },
+ { "name": "SPLUNK_ROLE", "value": "splunk_ingestor" },
+ { "name": "SPLUNK_DECLARATIVE_ADMIN_PASSWORD", "value": "true" },
+ { "name": "SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH", "value": "/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh" },
+ { "name": "SPLUNK_GENERAL_TERMS", "value": "--accept-sgt-current-at-splunk-com" },
+ { "name": "SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH", "value": "true" }
+ ],
+ "resources": {
+ "limits": { "cpu": "4", "memory": "8Gi" },
+ "requests": { "cpu": "100m", "memory": "512Mi" }
+ },
+ "volumeMounts": [
+ { "name": "pvc-etc", "mountPath": "/opt/splunk/etc" },
+ { "name": "pvc-var", "mountPath": "/opt/splunk/var" },
+ { "name": "splunk-test-probe-configmap", "mountPath": "/mnt/probes" },
+ { "name": "mnt-splunk-secrets", "mountPath": "/mnt/splunk-secrets" }
+ ],
+ "livenessProbe": {
+ "exec": { "command": ["/mnt/probes/livenessProbe.sh"] },
+ "initialDelaySeconds": 30,
+ "timeoutSeconds": 30,
+ "periodSeconds": 30,
+ "failureThreshold": 3
+ },
+ "readinessProbe": {
+ "exec": { "command": ["/mnt/probes/readinessProbe.sh"] },
+ "initialDelaySeconds": 10,
+ "timeoutSeconds": 5,
+ "periodSeconds": 5,
+ "failureThreshold": 3
+ },
+ "startupProbe": {
+ "exec": { "command": ["/mnt/probes/startupProbe.sh"] },
+ "initialDelaySeconds": 40,
+ "timeoutSeconds": 30,
+ "periodSeconds": 30,
+ "failureThreshold": 12
+ },
+ "imagePullPolicy": "IfNotPresent",
+ "securityContext": {
+ "capabilities": { "add": ["NET_BIND_SERVICE"], "drop": ["ALL"] },
+ "privileged": false,
+ "runAsUser": 41812,
+ "runAsNonRoot": true,
+ "allowPrivilegeEscalation": false,
+ "seccompProfile": { "type": "RuntimeDefault" }
+ }
+ }
+ ],
+ "serviceAccountName": "defaults",
+ "securityContext": {
+ "runAsUser": 41812,
+ "runAsNonRoot": true,
+ "fsGroup": 41812,
+ "fsGroupChangePolicy": "OnRootMismatch"
+ },
+ "affinity": {
+ "podAntiAffinity": {
+ "preferredDuringSchedulingIgnoredDuringExecution": [
+ {
+ "weight": 100,
+ "podAffinityTerm": {
+ "labelSelector": {
+ "matchExpressions": [
+ {
+ "key": "app.kubernetes.io/instance",
+ "operator": "In",
+ "values": ["splunk-test-ingestor"]
+ }
+ ]
+ },
+ "topologyKey": "kubernetes.io/hostname"
+ }
+ }
+ ]
+ }
+ },
+ "schedulerName": "default-scheduler"
+ }
+ },
+ "volumeClaimTemplates": [
+ {
+ "metadata": {
+ "name": "pvc-etc",
+ "namespace": "test",
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ }
+ },
+ "spec": {
+ "accessModes": ["ReadWriteOnce"],
+ "resources": { "requests": { "storage": "10Gi" } }
+ },
+ "status": {}
+ },
+ {
+ "metadata": {
+ "name": "pvc-var",
+ "namespace": "test",
+ "creationTimestamp": null,
+ "labels": {
+ "app.kubernetes.io/component": "ingestor",
+ "app.kubernetes.io/instance": "splunk-test-ingestor",
+ "app.kubernetes.io/managed-by": "splunk-operator",
+ "app.kubernetes.io/name": "ingestor",
+ "app.kubernetes.io/part-of": "splunk-test-ingestor"
+ }
+ },
+ "spec": {
+ "accessModes": ["ReadWriteOnce"],
+ "resources": { "requests": { "storage": "100Gi" } }
+ },
+ "status": {}
+ }
+ ],
+ "serviceName": "splunk-test-ingestor-headless",
+ "podManagementPolicy": "Parallel",
+ "updateStrategy": { "type": "OnDelete" }
+ },
+ "status": { "replicas": 0, "availableReplicas": 0 }
+}
\ No newline at end of file
diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go
index 7b34c5eeb..8d0c2dd9d 100644
--- a/pkg/splunk/enterprise/types.go
+++ b/pkg/splunk/enterprise/types.go
@@ -60,6 +60,15 @@ const (
// SplunkIndexer may be a standalone or clustered indexer peer
SplunkIndexer InstanceType = "indexer"
+ // SplunkIngestor may be a standalone or clustered ingestion peer
+ SplunkIngestor InstanceType = "ingestor"
+
+ // SplunkQueue is the queue instance
+ SplunkQueue InstanceType = "queue"
+
+ // SplunkObjectStorage is the object storage instance
+ SplunkObjectStorage InstanceType = "object-storage"
+
// SplunkDeployer is an instance that distributes baseline configurations and apps to search head cluster members
SplunkDeployer InstanceType = "deployer"
@@ -129,6 +138,10 @@ type PipelineWorker struct {
// indicates a fan out worker
fanOut bool
+
+ // Optional injected pod exec client for testing (avoids real network I/O)
+ // If nil, runPodCopyWorker will create a real client
+ podExecClient splutil.PodExecClientImpl
}
// PipelinePhase represents one phase in the overall installation pipeline
@@ -244,6 +257,8 @@ func (instanceType InstanceType) ToRole() string {
role = splcommon.LicenseManagerRole
case SplunkMonitoringConsole:
role = "splunk_monitor"
+ case SplunkIngestor:
+ role = "splunk_ingestor"
}
return role
}
@@ -270,6 +285,8 @@ func (instanceType InstanceType) ToKind() string {
kind = "license-manager"
case SplunkMonitoringConsole:
kind = "monitoring-console"
+ case SplunkIngestor:
+ kind = "ingestor"
}
return kind
}
@@ -282,6 +299,12 @@ func KindToInstanceString(kind string) string {
return SplunkClusterMaster.ToString()
case "IndexerCluster":
return SplunkIndexer.ToString()
+ case "IngestorCluster":
+ return SplunkIngestor.ToString()
+ case "Queue":
+ return SplunkQueue.ToString()
+ case "ObjectStorage":
+ return SplunkObjectStorage.ToString()
case "LicenseManager":
return SplunkLicenseManager.ToString()
case "LicenseMaster":
diff --git a/pkg/splunk/enterprise/types_test.go b/pkg/splunk/enterprise/types_test.go
index edde72ca8..61a778532 100644
--- a/pkg/splunk/enterprise/types_test.go
+++ b/pkg/splunk/enterprise/types_test.go
@@ -39,6 +39,7 @@ func TestInstanceType(t *testing.T) {
SplunkLicenseMaster: splcommon.LicenseManagerRole,
SplunkLicenseManager: splcommon.LicenseManagerRole,
SplunkMonitoringConsole: "splunk_monitor",
+ SplunkIngestor: "splunk_ingestor",
}
for key, val := range instMap {
if key.ToRole() != val {
@@ -57,6 +58,7 @@ func TestInstanceType(t *testing.T) {
SplunkLicenseMaster: splcommon.LicenseManager,
SplunkLicenseManager: "license-manager",
SplunkMonitoringConsole: "monitoring-console",
+ SplunkIngestor: "ingestor",
}
for key, val := range instMap {
if key.ToKind() != val {
@@ -65,3 +67,29 @@ func TestInstanceType(t *testing.T) {
}
}
+
+func TestKindToInstanceString(t *testing.T) {
+ tests := []struct {
+ kind string
+ expected string
+ }{
+ {"ClusterManager", "cluster-manager"},
+ {"ClusterMaster", "cluster-master"},
+ {"IndexerCluster", "indexer"},
+ {"IngestorCluster", "ingestor"},
+ {"LicenseManager", "license-manager"},
+ {"LicenseMaster", "license-master"},
+ {"MonitoringConsole", "monitoring-console"},
+ {"SearchHeadCluster", "search-head"},
+ {"SearchHead", "search-head"},
+ {"Standalone", "standalone"},
+ {"UnknownKind", ""},
+ }
+
+ for _, tt := range tests {
+ got := KindToInstanceString(tt.kind)
+ if got != tt.expected {
+ t.Errorf("KindToInstanceString(%q) = %q; want %q", tt.kind, got, tt.expected)
+ }
+ }
+}
diff --git a/pkg/splunk/enterprise/upgrade.go b/pkg/splunk/enterprise/upgrade.go
index 5d50e8cec..a62fe4f00 100644
--- a/pkg/splunk/enterprise/upgrade.go
+++ b/pkg/splunk/enterprise/upgrade.go
@@ -10,7 +10,6 @@ import (
appsv1 "k8s.io/api/apps/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
- rclient "sigs.k8s.io/controller-runtime/pkg/client"
runtime "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
)
@@ -36,7 +35,10 @@ var GetClusterInfoCall = func(ctx context.Context, mgr *indexerClusterPodManager
func UpgradePathValidation(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, spec enterpriseApi.CommonSplunkSpec, mgr *indexerClusterPodManager) (bool, error) {
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("isClusterManagerReadyForUpgrade").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
- eventPublisher, _ := newK8EventPublisher(c, cr)
+
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, cr)
+
kind := cr.GroupVersionKind().Kind
scopedLog.Info("kind is set to ", "kind", kind)
// start from standalone first
@@ -141,6 +143,11 @@ ClusterManager:
return false, fmt.Errorf("cluster manager %s is not ready (phase: %s). IndexerCluster upgrade is waiting for ClusterManager to be ready", clusterManager.Name, clusterManager.Status.Phase)
}
if cmImage != spec.Image {
+ // Emit event when upgrade is blocked due to ClusterManager / IndexerCluster version mismatch
+ if eventPublisher != nil {
+ eventPublisher.Warning(ctx, "UpgradeBlockedVersionMismatch",
+ fmt.Sprintf("Upgrade blocked: ClusterManager version %s != IndexerCluster version %s. Upgrade ClusterManager first.", cmImage, spec.Image))
+ }
return false, fmt.Errorf("cluster manager %s image (%s) does not match IndexerCluster image (%s). Please upgrade ClusterManager and IndexerCluster together using the operator's RELATED_IMAGE_SPLUNK_ENTERPRISE or upgrade the ClusterManager first", clusterManager.Name, cmImage, spec.Image)
}
goto IndexerCluster
@@ -161,8 +168,8 @@ IndexerCluster:
}
// check if cluster is multisite
if clusterInfo.MultiSite == "true" {
- opts := []rclient.ListOption{
- rclient.InNamespace(cr.GetNamespace()),
+ opts := []runtime.ListOption{
+ runtime.InNamespace(cr.GetNamespace()),
}
indexerList, err := getIndexerClusterList(ctx, c, cr, opts)
if err != nil {
@@ -220,8 +227,8 @@ SearchHeadCluster:
// check if a search head cluster exists with the same ClusterManager instance attached
searchHeadClusterInstance := enterpriseApi.SearchHeadCluster{}
- opts := []rclient.ListOption{
- rclient.InNamespace(cr.GetNamespace()),
+ opts := []runtime.ListOption{
+ runtime.InNamespace(cr.GetNamespace()),
}
searchHeadList, err := getSearchHeadClusterList(ctx, c, cr, opts)
if err != nil {
diff --git a/pkg/splunk/enterprise/upgrade_test.go b/pkg/splunk/enterprise/upgrade_test.go
index b501527fd..fc960e61e 100644
--- a/pkg/splunk/enterprise/upgrade_test.go
+++ b/pkg/splunk/enterprise/upgrade_test.go
@@ -214,7 +214,7 @@ func TestUpgradePathValidation(t *testing.T) {
t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err)
}
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
// license manager statefulset is not created
if err != nil && !k8serrors.IsNotFound(err) {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
@@ -268,7 +268,7 @@ func TestUpgradePathValidation(t *testing.T) {
t.Errorf("lm is not in ready state")
}
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
// lm statefulset should have been created by now, this should pass
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
@@ -279,7 +279,7 @@ func TestUpgradePathValidation(t *testing.T) {
updateStatefulSetsInTest(t, ctx, client, 1, fmt.Sprintf("splunk-%s-cluster-manager", cm.Name), cm.Namespace)
cm.Status.TelAppInstalled = true
// cluster manager is found and creat
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
// lm statefulset should have been created by now, this should pass
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
@@ -532,7 +532,7 @@ func TestUpgradePathValidation(t *testing.T) {
}
cm.Status.TelAppInstalled = true
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
if err != nil {
t.Errorf("applyClusterManager after update should not have returned error; err=%v", err)
}
@@ -585,13 +585,13 @@ func TestUpgradePathValidation(t *testing.T) {
}
cm.Status.TelAppInstalled = true
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
if err != nil {
t.Errorf("applyClusterManager after update should not have returned error; err=%v", err)
}
cm.Status.TelAppInstalled = true
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
if err != nil {
t.Errorf("applyClusterManager after update should not have returned error; err=%v", err)
}
@@ -625,6 +625,101 @@ func TestUpgradePathValidation(t *testing.T) {
}
+func TestUpgradeBlockedVersionMismatchEvent(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ sch := pkgruntime.NewScheme()
+ utilruntime.Must(clientgoscheme.AddToScheme(sch))
+ utilruntime.Must(corev1.AddToScheme(sch))
+ utilruntime.Must(enterpriseApi.AddToScheme(sch))
+
+ builder := fake.NewClientBuilder().
+ WithScheme(sch).
+ WithStatusSubresource(&enterpriseApi.ClusterManager{}).
+ WithStatusSubresource(&enterpriseApi.IndexerCluster{})
+
+ client := builder.Build()
+ ctx := context.TODO()
+
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+
+ // Create ClusterManager with old image, phase Ready
+ cm := enterpriseApi.ClusterManager{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "test"},
+ Spec: enterpriseApi.ClusterManagerSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{Image: "splunk/splunk:old"},
+ },
+ },
+ }
+ cm.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("ClusterManager"))
+ if err := client.Create(ctx, &cm); err != nil {
+ t.Fatalf("Failed to create ClusterManager: %v", err)
+ }
+ cm.Status.Phase = enterpriseApi.PhaseReady
+ if err := client.Status().Update(ctx, &cm); err != nil {
+ t.Fatalf("Failed to update ClusterManager status: %v", err)
+ }
+
+ // Create CM statefulset with old image
+ cmSS := &appsv1.StatefulSet{
+ ObjectMeta: metav1.ObjectMeta{Name: "splunk-test-cm-cluster-manager", Namespace: "test"},
+ Spec: appsv1.StatefulSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "test"}},
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "test"}},
+ Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "splunk", Image: "splunk/splunk:old"}}},
+ },
+ },
+ }
+ if err := client.Create(ctx, cmSS); err != nil {
+ t.Fatalf("Failed to create CM StatefulSet: %v", err)
+ }
+
+ // IndexerCluster CR with NEW image (mismatch with CM)
+ idx := enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-idx", Namespace: "test"},
+ Spec: enterpriseApi.IndexerClusterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{Image: "splunk/splunk:new"},
+ ClusterManagerRef: corev1.ObjectReference{Name: "test-cm"},
+ },
+ },
+ }
+ idx.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("IndexerCluster"))
+
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ mgr := &indexerClusterPodManager{}
+ continueReconcile, err := UpgradePathValidation(ctx, client, &idx, idx.Spec.CommonSplunkSpec, mgr)
+
+ if continueReconcile {
+ t.Errorf("Expected continueReconcile to be false when CM image mismatches IDX image")
+ }
+ if err == nil {
+ t.Errorf("Expected error when CM image mismatches IDX image")
+ }
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "UpgradeBlockedVersionMismatch" {
+ found = true
+ if event.eventType != corev1.EventTypeWarning {
+ t.Errorf("Expected Warning event type for UpgradeBlockedVersionMismatch, got %s", event.eventType)
+ }
+ expectedMessage := "Upgrade blocked: ClusterManager version splunk/splunk:old != IndexerCluster version splunk/splunk:new. Upgrade ClusterManager first."
+ if event.message != expectedMessage {
+ t.Errorf("Expected event message %q, got: %q", expectedMessage, event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected UpgradeBlockedVersionMismatch event to be published")
+ }
+}
+
func createPods(t *testing.T, ctx context.Context, client common.ControllerClient, crtype, name, namespace, image string) {
stpod := &corev1.Pod{}
namespacesName := types.NamespacedName{
diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go
index f616074b0..3b525f94a 100644
--- a/pkg/splunk/enterprise/util.go
+++ b/pkg/splunk/enterprise/util.go
@@ -34,6 +34,9 @@ import (
"testing"
"time"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/aws-sdk-go-v2/service/sqs"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
appsv1 "k8s.io/api/apps/v1"
@@ -114,7 +117,7 @@ func initStorageTracker() error {
// updateStorageTracker updates the storage tracker with the latest disk info
func updateStorageTracker(ctx context.Context) error {
- if !isPersistantVolConfigured() {
+ if !isPersistentVolConfigured() {
return fmt.Errorf("operator resource tracker not initialized")
}
@@ -139,6 +142,9 @@ func GetRemoteStorageClient(ctx context.Context, client splcommon.ControllerClie
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("GetRemoteStorageClient").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, cr)
+
remoteDataClient := splclient.SplunkRemoteDataClient{}
//use the provider name to get the corresponding function pointer
getClientWrapper := splclient.RemoteDataClientsMap[vol.Provider]
@@ -156,6 +162,14 @@ func GetRemoteStorageClient(ctx context.Context, client splcommon.ControllerClie
// Get credentials through the secretRef
remoteDataClientSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), appSecretRef)
if err != nil {
+ // Emit event for missing secret
+ if k8serrors.IsNotFound(err) {
+ if eventPublisher != nil {
+ eventPublisher.Warning(ctx, "SecretMissing",
+ fmt.Sprintf("Required secret '%s' not found in namespace '%s'. Create secret to proceed.", appSecretRef, cr.GetNamespace()))
+ }
+ }
+
return remoteDataClient, err
}
@@ -205,6 +219,11 @@ func GetRemoteStorageClient(ctx context.Context, client splcommon.ControllerClie
if err != nil {
scopedLog.Error(err, "Failed to get the S3 client")
+ // Emit event when operator cannot connect to the remote app repository
+ if eventPublisher != nil {
+ eventPublisher.Warning(ctx, "AppRepositoryConnectionFailed",
+ fmt.Sprintf("Failed to connect to app repository '%s': %s. Check credentials and network.", vol.Name, err.Error()))
+ }
return remoteDataClient, err
}
@@ -398,8 +417,18 @@ func getSearchHeadExtraEnv(cr splcommon.MetaObject, replicas int32) []corev1.Env
// GetSmartstoreRemoteVolumeSecrets is used to retrieve S3 access key and secrete keys.
func GetSmartstoreRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject, smartstore *enterpriseApi.SmartStoreSpec) (string, string, string, error) {
+ // Get event publisher from context
+ eventPublisher := GetEventPublisher(ctx, cr)
+
namespaceScopedSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), volume.SecretRef)
if err != nil {
+ // Emit event for missing secret
+ if k8serrors.IsNotFound(err) {
+ if eventPublisher != nil {
+ eventPublisher.Warning(ctx, "SecretMissing",
+ fmt.Sprintf("Required secret '%s' not found in namespace '%s'. Create secret to proceed.", volume.SecretRef, cr.GetNamespace()))
+ }
+ }
return "", "", "", err
}
@@ -409,14 +438,43 @@ func GetSmartstoreRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.
splutil.SetSecretOwnerRef(ctx, client, volume.SecretRef, cr)
if accessKey == "" {
+ if eventPublisher != nil {
+ eventPublisher.Warning(ctx, "SecretInvalid",
+ fmt.Sprintf("Secret '%s' missing required fields: %s. Update secret with required data.", namespaceScopedSecret.GetName(), "accessKey"))
+ }
return "", "", "", fmt.Errorf("s3 Access Key is missing")
} else if secretKey == "" {
+ if eventPublisher != nil {
+ eventPublisher.Warning(ctx, "SecretInvalid",
+ fmt.Sprintf("Secret '%s' missing required fields: %s. Update secret with required data.", namespaceScopedSecret.GetName(), "s3SecretKey"))
+ }
return "", "", "", fmt.Errorf("s3 Secret Key is missing")
}
return accessKey, secretKey, namespaceScopedSecret.ResourceVersion, nil
}
+// GetQueueRemoteVolumeSecrets is used to retrieve access key and secrete key for Index & Ingestion separation
+func GetQueueRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject) (string, string, string, error) {
+ namespaceScopedSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), volume.SecretRef)
+ if err != nil {
+ return "", "", "", err
+ }
+
+ accessKey := string(namespaceScopedSecret.Data[s3AccessKey])
+ secretKey := string(namespaceScopedSecret.Data[s3SecretKey])
+
+ version := namespaceScopedSecret.ResourceVersion
+
+ if accessKey == "" {
+ return "", "", "", errors.New("access Key is missing")
+ } else if secretKey == "" {
+ return "", "", "", errors.New("secret Key is missing")
+ }
+
+ return accessKey, secretKey, version, nil
+}
+
// getLocalAppFileName generates the local app file name
// For e.g., if the app package name is sample_app.tgz
// and etag is "abcd1234", then it will be downloaded locally as sample_app.tgz_abcd1234
@@ -1123,7 +1181,7 @@ func removeStaleEntriesFromAuxPhaseInfo(ctx context.Context, desiredReplicas int
}
// changeAppSrcDeployInfoStatus sets the new status to all the apps in an AppSrc if the given repo state and deploy status matches
-// primarly used in Phase-3
+// primarily used in Phase-3
func changeAppSrcDeployInfoStatus(ctx context.Context, appSrc string, appSrcDeployStatus map[string]enterpriseApi.AppSrcDeployInfo, repoState enterpriseApi.AppRepoState, oldDeployStatus enterpriseApi.AppDeploymentStatus, newDeployStatus enterpriseApi.AppDeploymentStatus) {
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("changeAppSrcDeployInfoStatus").WithValues("Called for AppSource: ", appSrc, "repoState", repoState, "oldDeployStatus", oldDeployStatus, "newDeployStatus", newDeployStatus)
@@ -1240,8 +1298,8 @@ func handleAppRepoChanges(ctx context.Context, client splcommon.ControllerClient
return appsModified, err
}
-// isAppExtentionValid checks if an app extention is supported or not
-func isAppExtentionValid(receivedKey string) bool {
+// isAppExtensionValid checks if an app extension is supported or not
+func isAppExtensionValid(receivedKey string) bool {
validExtensions := []string{".spl", ".tgz", ".tar.gz"}
for _, ext := range validExtensions {
@@ -1265,7 +1323,7 @@ func AddOrUpdateAppSrcDeploymentInfoList(ctx context.Context, appSrcDeploymentIn
for _, remoteObj := range remoteS3ObjList {
receivedKey := *remoteObj.Key
- if !isAppExtentionValid(receivedKey) {
+ if !isAppExtensionValid(receivedKey) {
scopedLog.Error(nil, "App name Parsing: Ignoring the key with invalid extension", "receivedKey", receivedKey)
continue
}
@@ -2010,14 +2068,14 @@ func setInstallStateForClusterScopedApps(ctx context.Context, appDeployContext *
}
}
-// isPersistantVolConfigured confirms if the Operator Pod is configured with storage
-func isPersistantVolConfigured() bool {
+// isPersistentVolConfigured confirms if the Operator Pod is configured with storage
+func isPersistentVolConfigured() bool {
return operatorResourceTracker != nil && operatorResourceTracker.storage != nil
}
// reserveStorage tries to reserve the amount of requested storage
func reserveStorage(allocSize uint64) error {
- if !isPersistantVolConfigured() {
+ if !isPersistentVolConfigured() {
return fmt.Errorf("storageTracker was not initialized")
}
@@ -2036,7 +2094,7 @@ func reserveStorage(allocSize uint64) error {
// releaseStorage releases the reserved storage
func releaseStorage(releaseSize uint64) error {
- if !isPersistantVolConfigured() {
+ if !isPersistentVolConfigured() {
return fmt.Errorf("storageTracker was not initialized")
}
@@ -2277,6 +2335,48 @@ func fetchCurrentCRWithStatusUpdate(ctx context.Context, client splcommon.Contro
origCR.(*enterpriseApi.Standalone).Status.DeepCopyInto(&latestStdlnCR.Status)
return latestStdlnCR, nil
+ case "IngestorCluster":
+ latestIngCR := &enterpriseApi.IngestorCluster{}
+ err = client.Get(ctx, namespacedName, latestIngCR)
+ if err != nil {
+ return nil, err
+ }
+
+ origCR.(*enterpriseApi.IngestorCluster).Status.Message = ""
+ if (crError != nil) && ((*crError) != nil) {
+ origCR.(*enterpriseApi.IngestorCluster).Status.Message = (*crError).Error()
+ }
+ origCR.(*enterpriseApi.IngestorCluster).Status.DeepCopyInto(&latestIngCR.Status)
+ return latestIngCR, nil
+
+ case "Queue":
+ latestQueueCR := &enterpriseApi.Queue{}
+ err = client.Get(ctx, namespacedName, latestQueueCR)
+ if err != nil {
+ return nil, err
+ }
+
+ origCR.(*enterpriseApi.Queue).Status.Message = ""
+ if (crError != nil) && ((*crError) != nil) {
+ origCR.(*enterpriseApi.Queue).Status.Message = (*crError).Error()
+ }
+ origCR.(*enterpriseApi.Queue).Status.DeepCopyInto(&latestQueueCR.Status)
+ return latestQueueCR, nil
+
+ case "ObjectStorage":
+ latestOsCR := &enterpriseApi.ObjectStorage{}
+ err = client.Get(ctx, namespacedName, latestOsCR)
+ if err != nil {
+ return nil, err
+ }
+
+ origCR.(*enterpriseApi.ObjectStorage).Status.Message = ""
+ if (crError != nil) && ((*crError) != nil) {
+ origCR.(*enterpriseApi.ObjectStorage).Status.Message = (*crError).Error()
+ }
+ origCR.(*enterpriseApi.ObjectStorage).Status.DeepCopyInto(&latestOsCR.Status)
+ return latestOsCR, nil
+
case "LicenseMaster":
latestLmCR := &enterpriseApiV3.LicenseMaster{}
err = client.Get(ctx, namespacedName, latestLmCR)
@@ -2452,6 +2552,8 @@ func getApplicablePodNameForK8Probes(cr splcommon.MetaObject, ordinalIdx int32)
podType = "cluster-manager"
case "MonitoringConsole":
podType = "monitoring-console"
+ case "IngestorCluster":
+ podType = "ingestor"
}
return fmt.Sprintf("splunk-%s-%s-%d", cr.GetName(), podType, ordinalIdx)
}
@@ -2511,3 +2613,108 @@ func loadFixture(t *testing.T, filename string) string {
}
return compactJSON.String()
}
+
+// QueueOSConfig holds resolved Queue and ObjectStorage specs with credentials
+type QueueOSConfig struct {
+ Queue enterpriseApi.QueueSpec
+ OS enterpriseApi.ObjectStorageSpec
+ AccessKey string
+ SecretKey string
+ Version string
+}
+
+// ResolveQueueAndObjectStorage fetches Queue and ObjectStorage CRs, resolves
+// their endpoints, and extracts credentials from the referenced secret.
+func ResolveQueueAndObjectStorage(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, queueRef, osRef corev1.ObjectReference, serviceAccount string) (*QueueOSConfig, error) {
+ cfg := &QueueOSConfig{}
+
+ if queueRef.Name != "" {
+ ns := cr.GetNamespace()
+ if queueRef.Namespace != "" {
+ ns = queueRef.Namespace
+ }
+ var queue enterpriseApi.Queue
+ if err := c.Get(ctx, types.NamespacedName{Name: queueRef.Name, Namespace: ns}, &queue); err != nil {
+ return nil, err
+ }
+ cfg.Queue = queue.Spec
+ }
+ if cfg.Queue.Provider == "sqs" || cfg.Queue.Provider == "sqs_cp" {
+ if cfg.Queue.SQS.Endpoint == "" && cfg.Queue.SQS.AuthRegion != "" {
+ ep, err := resolveSQSEndpoint(ctx, cfg.Queue.SQS.AuthRegion)
+ if err != nil {
+ return nil, err
+ }
+ cfg.Queue.SQS.Endpoint = ep
+ }
+ }
+
+ if osRef.Name != "" {
+ ns := cr.GetNamespace()
+ if osRef.Namespace != "" {
+ ns = osRef.Namespace
+ }
+ var os enterpriseApi.ObjectStorage
+ if err := c.Get(ctx, types.NamespacedName{Name: osRef.Name, Namespace: ns}, &os); err != nil {
+ return nil, err
+ }
+ cfg.OS = os.Spec
+ }
+ if cfg.OS.Provider == "s3" {
+ if cfg.OS.S3.Endpoint == "" && cfg.Queue.SQS.AuthRegion != "" {
+ ep, err := resolveS3Endpoint(ctx, cfg.Queue.SQS.AuthRegion)
+ if err != nil {
+ return nil, err
+ }
+ cfg.OS.S3.Endpoint = ep
+ }
+ }
+
+ if (cfg.Queue.Provider == "sqs" || cfg.Queue.Provider == "sqs_cp") && serviceAccount == "" {
+ for _, vol := range cfg.Queue.SQS.VolList {
+ if vol.SecretRef != "" {
+ accessKey, secretKey, version, err := GetQueueRemoteVolumeSecrets(ctx, vol, c, cr)
+ if err != nil {
+ return nil, err
+ }
+ cfg.AccessKey = accessKey
+ cfg.SecretKey = secretKey
+ cfg.Version = version
+ }
+ }
+ }
+
+ return cfg, nil
+}
+
+func resolveS3Endpoint(ctx context.Context, region string) (string, error) {
+ cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region))
+ if err != nil {
+ return "", err
+ }
+
+ client := s3.NewFromConfig(cfg)
+ params := s3.EndpointParameters{Region: ®ion}
+
+ ep, err := client.Options().EndpointResolverV2.ResolveEndpoint(ctx, params)
+ if err != nil {
+ return "", err
+ }
+ return ep.URI.String(), nil
+}
+
+func resolveSQSEndpoint(ctx context.Context, region string) (string, error) {
+ cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region))
+ if err != nil {
+ return "", err
+ }
+
+ client := sqs.NewFromConfig(cfg)
+ params := sqs.EndpointParameters{Region: ®ion}
+
+ ep, err := client.Options().EndpointResolverV2.ResolveEndpoint(ctx, params)
+ if err != nil {
+ return "", err
+ }
+ return ep.URI.String(), nil
+}
diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go
index e717e82da..b2ac6ba4d 100644
--- a/pkg/splunk/enterprise/util_test.go
+++ b/pkg/splunk/enterprise/util_test.go
@@ -1215,11 +1215,11 @@ func TestGetAvailableDiskSpaceShouldFail(t *testing.T) {
}
func TestIsAppExtentionValid(t *testing.T) {
- if !isAppExtentionValid("testapp.spl") || !isAppExtentionValid("testapp.tgz") || !isAppExtentionValid("testapp.tar.gz") {
+ if !isAppExtensionValid("testapp.spl") || !isAppExtensionValid("testapp.tgz") || !isAppExtensionValid("testapp.tar.gz") {
t.Errorf("failed to detect valid app extension")
}
- if isAppExtentionValid("testapp.aspl") || isAppExtentionValid("testapp.ttgz") {
+ if isAppExtensionValid("testapp.aspl") || isAppExtensionValid("testapp.ttgz") {
t.Errorf("failed to detect invalid app extension")
}
}
@@ -2130,19 +2130,19 @@ func TestUpdateStorageTracker(t *testing.T) {
func TestIsPersistantVolConfigured(t *testing.T) {
// when the resource tracker not initialized, should return false
operatorResourceTracker = nil
- if isPersistantVolConfigured() {
+ if isPersistentVolConfigured() {
t.Errorf("When the resource tracker is not initialized, should resturn false")
}
// when the storage tracker not initialized, should return false
operatorResourceTracker = &globalResourceTracker{}
- if isPersistantVolConfigured() {
+ if isPersistentVolConfigured() {
t.Errorf("When the storage tracker is not initialized, should return false")
}
// Should return true, when the trackers are initialized
operatorResourceTracker.storage = &storageTracker{}
- if !isPersistantVolConfigured() {
+ if !isPersistentVolConfigured() {
t.Errorf("When the storage tracker is initialized, should return true")
}
}
@@ -2612,6 +2612,8 @@ func TestUpdateReconcileRequeueTime(t *testing.T) {
}
func TestUpdateCRStatus(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
sch := pkgruntime.NewScheme()
utilruntime.Must(clientgoscheme.AddToScheme(sch))
utilruntime.Must(corev1.AddToScheme(sch))
@@ -2624,6 +2626,9 @@ func TestUpdateCRStatus(t *testing.T) {
WithStatusSubresource(&enterpriseApi.Standalone{}).
WithStatusSubresource(&enterpriseApi.MonitoringConsole{}).
WithStatusSubresource(&enterpriseApi.IndexerCluster{}).
+ WithStatusSubresource(&enterpriseApi.Queue{}).
+ WithStatusSubresource(&enterpriseApi.ObjectStorage{}).
+ WithStatusSubresource(&enterpriseApi.IngestorCluster{}).
WithStatusSubresource(&enterpriseApi.SearchHeadCluster{})
c := builder.Build()
ctx := context.TODO()
@@ -2687,7 +2692,8 @@ func TestFetchCurrentCRWithStatusUpdate(t *testing.T) {
WithStatusSubresource(&enterpriseApi.IndexerCluster{}).
WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}).
WithStatusSubresource(&enterpriseApiV3.LicenseMaster{}).
- WithStatusSubresource(&enterpriseApiV3.ClusterMaster{})
+ WithStatusSubresource(&enterpriseApiV3.ClusterMaster{}).
+ WithStatusSubresource(&enterpriseApi.IngestorCluster{})
c := builder.Build()
ctx := context.TODO()
@@ -2923,6 +2929,43 @@ func TestFetchCurrentCRWithStatusUpdate(t *testing.T) {
} else if receivedCR.(*enterpriseApi.SearchHeadCluster).Status.Message != "testerror" {
t.Errorf("Failed to update error message")
}
+
+ // IngestorCluster: should return a valid CR
+ ic := enterpriseApi.IngestorCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "IngestorCluster",
+ APIVersion: "enterprise.splunk.com/v4",
+ },
+
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ },
+ Spec: enterpriseApi.IngestorClusterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ ImagePullPolicy: "Always",
+ },
+ Volumes: []corev1.Volume{},
+ },
+ },
+ Status: enterpriseApi.IngestorClusterStatus{
+ ReadyReplicas: 3,
+ },
+ }
+
+ // When the CR is available, should be able to fetch it.
+ err = c.Create(ctx, &ic)
+ if err != nil {
+ t.Errorf("ingestor CR creation failed.")
+ }
+
+ receivedCR, err = fetchCurrentCRWithStatusUpdate(ctx, c, &ic, nil)
+ if err != nil {
+ t.Errorf("Expected a valid CR without error, but got the error %v", err)
+ } else if receivedCR == nil || receivedCR.GroupVersionKind().Kind != "IngestorCluster" {
+ t.Errorf("Failed to fetch the CR")
+ }
}
// func getApplicablePodNameForK8Probes(t *testing.T) {
@@ -2984,6 +3027,13 @@ func TestGetApplicablePodNameForK8Probes(t *testing.T) {
if expectedPodName != returnedPodName {
t.Errorf("Unable to fetch correct pod name. Expected %s, returned %s", expectedPodName, returnedPodName)
}
+
+ cr.TypeMeta.Kind = "IngestorCluster"
+ expectedPodName = "splunk-stack1-ingestor-0"
+ returnedPodName = getApplicablePodNameForK8Probes(&cr, podID)
+ if expectedPodName != returnedPodName {
+ t.Errorf("Unable to fetch correct pod name. Expected %s, returned %s", expectedPodName, returnedPodName)
+ }
}
func TestCheckCmRemainingReferences(t *testing.T) {
@@ -3229,6 +3279,7 @@ func TestGetLicenseMasterURL(t *testing.T) {
}
}
func TestGetCurrentImage(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
ctx := context.TODO()
current := enterpriseApi.ClusterManager{
@@ -3258,10 +3309,13 @@ func TestGetCurrentImage(t *testing.T) {
WithStatusSubresource(&enterpriseApi.Standalone{}).
WithStatusSubresource(&enterpriseApi.MonitoringConsole{}).
WithStatusSubresource(&enterpriseApi.IndexerCluster{}).
- WithStatusSubresource(&enterpriseApi.SearchHeadCluster{})
+ WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}).
+ WithStatusSubresource(&enterpriseApi.Queue{}).
+ WithStatusSubresource(&enterpriseApi.ObjectStorage{}).
+ WithStatusSubresource(&enterpriseApi.IngestorCluster{})
client := builder.Build()
client.Create(ctx, ¤t)
- _, err := ApplyClusterManager(ctx, client, ¤t)
+ _, err := ApplyClusterManager(ctx, client, ¤t, nil)
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
}
@@ -3278,3 +3332,657 @@ func TestGetCurrentImage(t *testing.T) {
}
}
+
+func TestSecretMissingEvent(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ sch := pkgruntime.NewScheme()
+ utilruntime.Must(clientgoscheme.AddToScheme(sch))
+ utilruntime.Must(corev1.AddToScheme(sch))
+ utilruntime.Must(enterpriseApi.AddToScheme(sch))
+
+ client := fake.NewClientBuilder().WithScheme(sch).Build()
+ ctx := context.TODO()
+
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ cr := &enterpriseApi.ClusterManager{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "test"},
+ }
+
+ volume := enterpriseApi.VolumeSpec{
+ Name: "test-vol",
+ SecretRef: "nonexistent-secret",
+ }
+
+ _, _, _, err := GetSmartstoreRemoteVolumeSecrets(ctx, volume, client, cr, &enterpriseApi.SmartStoreSpec{})
+ if err == nil {
+ t.Errorf("Expected error when secret does not exist")
+ }
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "SecretMissing" {
+ found = true
+ if event.eventType != corev1.EventTypeWarning {
+ t.Errorf("Expected Warning event type for SecretMissing, got %s", event.eventType)
+ }
+ expectedMessage := "Required secret 'nonexistent-secret' not found in namespace 'test'. Create secret to proceed."
+ if event.message != expectedMessage {
+ t.Errorf("Expected event message %q, got: %q", expectedMessage, event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected SecretMissing event to be published")
+ }
+}
+
+func TestSecretInvalidEmptyAccessKeyEvent(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ sch := pkgruntime.NewScheme()
+ utilruntime.Must(clientgoscheme.AddToScheme(sch))
+ utilruntime.Must(corev1.AddToScheme(sch))
+ utilruntime.Must(enterpriseApi.AddToScheme(sch))
+
+ client := fake.NewClientBuilder().WithScheme(sch).Build()
+ ctx := context.TODO()
+
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ cr := &enterpriseApi.ClusterManager{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "test"},
+ }
+
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: "test"},
+ Data: map[string][]byte{
+ "s3_access_key": {},
+ "s3_secret_key": []byte("some-secret-key"),
+ },
+ }
+ if err := client.Create(ctx, secret); err != nil {
+ t.Fatalf("Failed to create secret: %v", err)
+ }
+
+ volume := enterpriseApi.VolumeSpec{
+ Name: "test-vol",
+ SecretRef: "test-secret",
+ }
+
+ _, _, _, err := GetSmartstoreRemoteVolumeSecrets(ctx, volume, client, cr, &enterpriseApi.SmartStoreSpec{})
+ if err == nil {
+ t.Errorf("Expected error when access key is empty")
+ }
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "SecretInvalid" {
+ found = true
+ if event.eventType != corev1.EventTypeWarning {
+ t.Errorf("Expected Warning event type for SecretInvalid, got %s", event.eventType)
+ }
+ expectedMessage := "Secret 'test-secret' missing required fields: accessKey. Update secret with required data."
+ if event.message != expectedMessage {
+ t.Errorf("Expected event message %q, got: %q", expectedMessage, event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected SecretInvalid event to be published for empty access key")
+ }
+}
+
+func TestSecretInvalidEmptySecretKeyEvent(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ sch := pkgruntime.NewScheme()
+ utilruntime.Must(clientgoscheme.AddToScheme(sch))
+ utilruntime.Must(corev1.AddToScheme(sch))
+ utilruntime.Must(enterpriseApi.AddToScheme(sch))
+
+ client := fake.NewClientBuilder().WithScheme(sch).Build()
+ ctx := context.TODO()
+
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ cr := &enterpriseApi.ClusterManager{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "test"},
+ }
+
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-secret-sk", Namespace: "test"},
+ Data: map[string][]byte{
+ "s3_access_key": []byte("some-access-key"),
+ "s3_secret_key": {},
+ },
+ }
+ if err := client.Create(ctx, secret); err != nil {
+ t.Fatalf("Failed to create secret: %v", err)
+ }
+
+ volume := enterpriseApi.VolumeSpec{
+ Name: "test-vol",
+ SecretRef: "test-secret-sk",
+ }
+
+ _, _, _, err := GetSmartstoreRemoteVolumeSecrets(ctx, volume, client, cr, &enterpriseApi.SmartStoreSpec{})
+ if err == nil {
+ t.Errorf("Expected error when secret key is empty")
+ }
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "SecretInvalid" {
+ found = true
+ if event.eventType != corev1.EventTypeWarning {
+ t.Errorf("Expected Warning event type for SecretInvalid, got %s", event.eventType)
+ }
+ expectedMessage := "Secret 'test-secret-sk' missing required fields: s3SecretKey. Update secret with required data."
+ if event.message != expectedMessage {
+ t.Errorf("Expected event message %q, got: %q", expectedMessage, event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected SecretInvalid event to be published for empty secret key")
+ }
+}
+
+func TestAppRepositoryConnectionFailedEvent(t *testing.T) {
+ os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ sch := pkgruntime.NewScheme()
+ utilruntime.Must(clientgoscheme.AddToScheme(sch))
+ utilruntime.Must(corev1.AddToScheme(sch))
+ utilruntime.Must(enterpriseApi.AddToScheme(sch))
+
+ client := fake.NewClientBuilder().WithScheme(sch).Build()
+ ctx := context.TODO()
+
+ recorder := &mockEventRecorder{events: []mockEvent{}}
+ eventPublisher := &K8EventPublisher{recorder: recorder}
+ ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher)
+
+ cr := &enterpriseApi.ClusterManager{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "test"},
+ }
+
+ // Create a secret with valid credentials so GetRemoteStorageClient reaches the getClient call
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-s3-secret", Namespace: "test"},
+ Data: map[string][]byte{
+ "s3_access_key": []byte("abc"),
+ "s3_secret_key": []byte("123"),
+ },
+ }
+ if err := client.Create(ctx, secret); err != nil {
+ t.Fatalf("Failed to create secret: %v", err)
+ }
+
+ // Register a mock provider that always returns an error from getClient
+ mockProvider := "mock-failing-provider"
+ splclient.RemoteDataClientsMap[mockProvider] = splclient.GetRemoteDataClientWrapper{
+ GetRemoteDataClient: func(ctx context.Context, bucket, accessKeyID, secretAccessKey, prefix, startAfter, region, endpoint string, fn splclient.GetInitFunc) (splclient.RemoteDataClient, error) {
+ return nil, fmt.Errorf("mock connection timeout")
+ },
+ GetInitFunc: func(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} {
+ return nil
+ },
+ }
+ defer delete(splclient.RemoteDataClientsMap, mockProvider)
+
+ vol := &enterpriseApi.VolumeSpec{
+ Name: "test-vol",
+ Provider: mockProvider,
+ Path: "test-bucket/apps",
+ SecretRef: "test-s3-secret",
+ }
+
+ // Call GetRemoteStorageClient — should fail at getClient and emit AppRepositoryConnectionFailed
+ _, err := GetRemoteStorageClient(ctx, client, cr, &enterpriseApi.AppFrameworkSpec{}, vol, "apps", nil)
+ if err == nil {
+ t.Errorf("Expected error from GetRemoteStorageClient when getClient fails")
+ }
+
+ found := false
+ for _, event := range recorder.events {
+ if event.reason == "AppRepositoryConnectionFailed" {
+ found = true
+ if event.eventType != corev1.EventTypeWarning {
+ t.Errorf("Expected Warning event type for AppRepositoryConnectionFailed, got %s", event.eventType)
+ }
+ expectedMessage := "Failed to connect to app repository 'test-vol': mock connection timeout. Check credentials and network."
+ if event.message != expectedMessage {
+ t.Errorf("Expected event message %q, got: %q", expectedMessage, event.message)
+ }
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected AppRepositoryConnectionFailed event to be published")
+ }
+}
+
+func TestResolveSQSEndpoint(t *testing.T) {
+ ctx := context.TODO()
+
+ tests := []struct {
+ name string
+ region string
+ wantEndpoint string
+ wantErrContain string
+ }{
+ {
+ name: "valid us-east-1 region",
+ region: "us-east-1",
+ wantEndpoint: "https://sqs.us-east-1.amazonaws.com",
+ },
+ {
+ name: "valid eu-west-1 region",
+ region: "eu-west-1",
+ wantEndpoint: "https://sqs.eu-west-1.amazonaws.com",
+ },
+ {
+ name: "valid ap-southeast-1 region",
+ region: "ap-southeast-1",
+ wantEndpoint: "https://sqs.ap-southeast-1.amazonaws.com",
+ },
+ {
+ name: "valid cn-north-1 region (China)",
+ region: "cn-north-1",
+ wantEndpoint: "https://sqs.cn-north-1.amazonaws.com.cn",
+ },
+ {
+ name: "valid cn-northwest-1 region (China Ningxia)",
+ region: "cn-northwest-1",
+ wantEndpoint: "https://sqs.cn-northwest-1.amazonaws.com.cn",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ endpoint, err := resolveSQSEndpoint(ctx, tt.region)
+ if tt.wantErrContain != "" {
+ if err == nil {
+ t.Errorf("resolveSQSEndpoint() expected error containing %q, got nil", tt.wantErrContain)
+ } else if !strings.Contains(err.Error(), tt.wantErrContain) {
+ t.Errorf("resolveSQSEndpoint() error = %v, want error containing %q", err, tt.wantErrContain)
+ }
+ return
+ }
+ if err != nil {
+ t.Errorf("resolveSQSEndpoint() unexpected error = %v", err)
+ return
+ }
+ if endpoint != tt.wantEndpoint {
+ t.Errorf("resolveSQSEndpoint() = %v, want %v", endpoint, tt.wantEndpoint)
+ }
+ })
+ }
+}
+
+func TestResolveS3Endpoint(t *testing.T) {
+ ctx := context.TODO()
+
+ tests := []struct {
+ name string
+ region string
+ wantEndpoint string
+ wantErrContain string
+ }{
+ {
+ name: "valid us-east-1 region",
+ region: "us-east-1",
+ wantEndpoint: "https://s3.us-east-1.amazonaws.com",
+ },
+ {
+ name: "valid eu-west-1 region",
+ region: "eu-west-1",
+ wantEndpoint: "https://s3.eu-west-1.amazonaws.com",
+ },
+ {
+ name: "valid ap-southeast-1 region",
+ region: "ap-southeast-1",
+ wantEndpoint: "https://s3.ap-southeast-1.amazonaws.com",
+ },
+ {
+ name: "valid cn-north-1 region (China)",
+ region: "cn-north-1",
+ wantEndpoint: "https://s3.cn-north-1.amazonaws.com.cn",
+ },
+ {
+ name: "valid cn-northwest-1 region (China Ningxia)",
+ region: "cn-northwest-1",
+ wantEndpoint: "https://s3.cn-northwest-1.amazonaws.com.cn",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ endpoint, err := resolveS3Endpoint(ctx, tt.region)
+ if tt.wantErrContain != "" {
+ if err == nil {
+ t.Errorf("resolveS3Endpoint() expected error containing %q, got nil", tt.wantErrContain)
+ } else if !strings.Contains(err.Error(), tt.wantErrContain) {
+ t.Errorf("resolveS3Endpoint() error = %v, want error containing %q", err, tt.wantErrContain)
+ }
+ return
+ }
+ if err != nil {
+ t.Errorf("resolveS3Endpoint() unexpected error = %v", err)
+ return
+ }
+ if endpoint != tt.wantEndpoint {
+ t.Errorf("resolveS3Endpoint() = %v, want %v", endpoint, tt.wantEndpoint)
+ }
+ })
+ }
+}
+
+func TestResolveQueueAndObjectStorage(t *testing.T) {
+ ctx := context.TODO()
+
+ sch := pkgruntime.NewScheme()
+ utilruntime.Must(clientgoscheme.AddToScheme(sch))
+ utilruntime.Must(corev1.AddToScheme(sch))
+ utilruntime.Must(enterpriseApi.AddToScheme(sch))
+
+ t.Run("empty refs returns empty config", func(t *testing.T) {
+ client := fake.NewClientBuilder().WithScheme(sch).Build()
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-idxc", Namespace: "test"},
+ }
+
+ cfg, err := ResolveQueueAndObjectStorage(ctx, client, cr, corev1.ObjectReference{}, corev1.ObjectReference{}, "")
+ if err != nil {
+ t.Errorf("ResolveQueueAndObjectStorage() unexpected error = %v", err)
+ }
+ if cfg == nil {
+ t.Fatal("ResolveQueueAndObjectStorage() returned nil config")
+ }
+ if cfg.Queue.Provider != "" {
+ t.Errorf("Expected empty Queue.Provider, got %q", cfg.Queue.Provider)
+ }
+ if cfg.OS.Provider != "" {
+ t.Errorf("Expected empty OS.Provider, got %q", cfg.OS.Provider)
+ }
+ })
+
+ t.Run("queue ref not found returns error", func(t *testing.T) {
+ client := fake.NewClientBuilder().WithScheme(sch).Build()
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-idxc", Namespace: "test"},
+ }
+
+ queueRef := corev1.ObjectReference{Name: "nonexistent-queue"}
+ _, err := ResolveQueueAndObjectStorage(ctx, client, cr, queueRef, corev1.ObjectReference{}, "")
+ if err == nil {
+ t.Error("ResolveQueueAndObjectStorage() expected error for nonexistent queue, got nil")
+ }
+ })
+
+ t.Run("objectstorage ref not found returns error", func(t *testing.T) {
+ client := fake.NewClientBuilder().WithScheme(sch).Build()
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-idxc", Namespace: "test"},
+ }
+
+ osRef := corev1.ObjectReference{Name: "nonexistent-os"}
+ _, err := ResolveQueueAndObjectStorage(ctx, client, cr, corev1.ObjectReference{}, osRef, "")
+ if err == nil {
+ t.Error("ResolveQueueAndObjectStorage() expected error for nonexistent objectstorage, got nil")
+ }
+ })
+
+ t.Run("valid queue ref returns queue spec", func(t *testing.T) {
+ queue := &enterpriseApi.Queue{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-queue", Namespace: "test"},
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "my-queue",
+ AuthRegion: "",
+ DLQ: "my-dlq",
+ Endpoint: "https://sqs.us-east-1.amazonaws.com",
+ },
+ },
+ }
+ client := fake.NewClientBuilder().WithScheme(sch).WithObjects(queue).Build()
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-idxc", Namespace: "test"},
+ }
+
+ queueRef := corev1.ObjectReference{Name: "test-queue"}
+ cfg, err := ResolveQueueAndObjectStorage(ctx, client, cr, queueRef, corev1.ObjectReference{}, "")
+ if err != nil {
+ t.Errorf("ResolveQueueAndObjectStorage() unexpected error = %v", err)
+ }
+ if cfg.Queue.Provider != "sqs" {
+ t.Errorf("Expected Queue.Provider = 'sqs', got %q", cfg.Queue.Provider)
+ }
+ if cfg.Queue.SQS.Name != "my-queue" {
+ t.Errorf("Expected Queue.SQS.Name = 'my-queue', got %q", cfg.Queue.SQS.Name)
+ }
+ if cfg.Queue.SQS.Endpoint != "https://sqs.us-east-1.amazonaws.com" {
+ t.Errorf("Expected Queue.SQS.Endpoint = 'https://sqs.us-east-1.amazonaws.com', got %q", cfg.Queue.SQS.Endpoint)
+ }
+ })
+
+ t.Run("valid objectstorage ref returns os spec", func(t *testing.T) {
+ os := &enterpriseApi.ObjectStorage{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-os", Namespace: "test"},
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-east-1.amazonaws.com",
+ Path: "my-bucket/prefix",
+ },
+ },
+ }
+ client := fake.NewClientBuilder().WithScheme(sch).WithObjects(os).Build()
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-idxc", Namespace: "test"},
+ }
+
+ osRef := corev1.ObjectReference{Name: "test-os"}
+ cfg, err := ResolveQueueAndObjectStorage(ctx, client, cr, corev1.ObjectReference{}, osRef, "")
+ if err != nil {
+ t.Errorf("ResolveQueueAndObjectStorage() unexpected error = %v", err)
+ }
+ if cfg.OS.Provider != "s3" {
+ t.Errorf("Expected OS.Provider = 's3', got %q", cfg.OS.Provider)
+ }
+ if cfg.OS.S3.Path != "my-bucket/prefix" {
+ t.Errorf("Expected OS.S3.Path = 'my-bucket/prefix', got %q", cfg.OS.S3.Path)
+ }
+ })
+
+ t.Run("queue ref with different namespace", func(t *testing.T) {
+ queue := &enterpriseApi.Queue{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-queue", Namespace: "other-ns"},
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "cross-ns-queue",
+ DLQ: "my-dlq",
+ Endpoint: "https://sqs.eu-west-1.amazonaws.com",
+ },
+ },
+ }
+ client := fake.NewClientBuilder().WithScheme(sch).WithObjects(queue).Build()
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-idxc", Namespace: "test"},
+ }
+
+ queueRef := corev1.ObjectReference{Name: "test-queue", Namespace: "other-ns"}
+ cfg, err := ResolveQueueAndObjectStorage(ctx, client, cr, queueRef, corev1.ObjectReference{}, "")
+ if err != nil {
+ t.Errorf("ResolveQueueAndObjectStorage() unexpected error = %v", err)
+ }
+ if cfg.Queue.SQS.Name != "cross-ns-queue" {
+ t.Errorf("Expected Queue.SQS.Name = 'cross-ns-queue', got %q", cfg.Queue.SQS.Name)
+ }
+ })
+
+ t.Run("queue with secret ref extracts credentials", func(t *testing.T) {
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "aws-creds", Namespace: "test"},
+ Data: map[string][]byte{
+ "s3_access_key": []byte("abc"),
+ "s3_secret_key": []byte("123"),
+ },
+ }
+ queue := &enterpriseApi.Queue{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-queue", Namespace: "test"},
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "my-queue",
+ DLQ: "my-dlq",
+ Endpoint: "https://sqs.us-east-1.amazonaws.com",
+ VolList: []enterpriseApi.VolumeSpec{
+ {
+ Name: "vol1",
+ SecretRef: "aws-creds",
+ },
+ },
+ },
+ },
+ }
+ client := fake.NewClientBuilder().WithScheme(sch).WithObjects(queue, secret).Build()
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-idxc", Namespace: "test"},
+ }
+
+ queueRef := corev1.ObjectReference{Name: "test-queue"}
+ cfg, err := ResolveQueueAndObjectStorage(ctx, client, cr, queueRef, corev1.ObjectReference{}, "")
+ if err != nil {
+ t.Errorf("ResolveQueueAndObjectStorage() unexpected error = %v", err)
+ }
+ if cfg.AccessKey != "abc" {
+ t.Errorf("Expected AccessKey = 'abc', got %q", cfg.AccessKey)
+ }
+ if cfg.SecretKey != "123" {
+ t.Errorf("Expected SecretKey = '123', got %q", cfg.SecretKey)
+ }
+ })
+
+ t.Run("queue with serviceAccount skips secret extraction", func(t *testing.T) {
+ queue := &enterpriseApi.Queue{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-queue", Namespace: "test"},
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "my-queue",
+ DLQ: "my-dlq",
+ Endpoint: "https://sqs.us-east-1.amazonaws.com",
+ VolList: []enterpriseApi.VolumeSpec{
+ {
+ Name: "vol1",
+ SecretRef: "aws-creds",
+ },
+ },
+ },
+ },
+ }
+ client := fake.NewClientBuilder().WithScheme(sch).WithObjects(queue).Build()
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-idxc", Namespace: "test"},
+ }
+
+ queueRef := corev1.ObjectReference{Name: "test-queue"}
+ cfg, err := ResolveQueueAndObjectStorage(ctx, client, cr, queueRef, corev1.ObjectReference{}, "my-service-account")
+ if err != nil {
+ t.Errorf("ResolveQueueAndObjectStorage() unexpected error = %v", err)
+ }
+ // When serviceAccount is provided, credentials should not be extracted
+ if cfg.AccessKey != "" {
+ t.Errorf("Expected empty AccessKey when serviceAccount is provided, got %q", cfg.AccessKey)
+ }
+ if cfg.SecretKey != "" {
+ t.Errorf("Expected empty SecretKey when serviceAccount is provided, got %q", cfg.SecretKey)
+ }
+ })
+
+ t.Run("queue with missing secret returns error", func(t *testing.T) {
+ queue := &enterpriseApi.Queue{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-queue", Namespace: "test"},
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "my-queue",
+ DLQ: "my-dlq",
+ Endpoint: "https://sqs.us-east-1.amazonaws.com",
+ VolList: []enterpriseApi.VolumeSpec{
+ {
+ Name: "vol1",
+ SecretRef: "nonexistent-secret",
+ },
+ },
+ },
+ },
+ }
+ client := fake.NewClientBuilder().WithScheme(sch).WithObjects(queue).Build()
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-idxc", Namespace: "test"},
+ }
+
+ queueRef := corev1.ObjectReference{Name: "test-queue"}
+ _, err := ResolveQueueAndObjectStorage(ctx, client, cr, queueRef, corev1.ObjectReference{}, "")
+ if err == nil {
+ t.Error("ResolveQueueAndObjectStorage() expected error for missing secret, got nil")
+ }
+ })
+
+ t.Run("both queue and objectstorage refs", func(t *testing.T) {
+ queue := &enterpriseApi.Queue{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-queue", Namespace: "test"},
+ Spec: enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "my-queue",
+ DLQ: "my-dlq",
+ Endpoint: "https://sqs.us-east-1.amazonaws.com",
+ },
+ },
+ }
+ os := &enterpriseApi.ObjectStorage{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-os", Namespace: "test"},
+ Spec: enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-east-1.amazonaws.com",
+ Path: "my-bucket",
+ },
+ },
+ }
+ client := fake.NewClientBuilder().WithScheme(sch).WithObjects(queue, os).Build()
+ cr := &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-idxc", Namespace: "test"},
+ }
+
+ queueRef := corev1.ObjectReference{Name: "test-queue"}
+ osRef := corev1.ObjectReference{Name: "test-os"}
+ cfg, err := ResolveQueueAndObjectStorage(ctx, client, cr, queueRef, osRef, "")
+ if err != nil {
+ t.Errorf("ResolveQueueAndObjectStorage() unexpected error = %v", err)
+ }
+ if cfg.Queue.Provider != "sqs" {
+ t.Errorf("Expected Queue.Provider = 'sqs', got %q", cfg.Queue.Provider)
+ }
+ if cfg.OS.Provider != "s3" {
+ t.Errorf("Expected OS.Provider = 's3', got %q", cfg.OS.Provider)
+ }
+ })
+}
diff --git a/pkg/splunk/enterprise/validation/clustermanager_validation.go b/pkg/splunk/enterprise/validation/clustermanager_validation.go
new file mode 100644
index 000000000..af77e1e84
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/clustermanager_validation.go
@@ -0,0 +1,59 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+)
+
+// ValidateClusterManagerCreate validates a ClusterManager on CREATE
+func ValidateClusterManagerCreate(obj *enterpriseApi.ClusterManager) field.ErrorList {
+ var allErrs field.ErrorList
+
+ // Validate common spec
+ allErrs = append(allErrs, validateCommonSplunkSpec(&obj.Spec.CommonSplunkSpec, field.NewPath("spec"))...)
+
+ // Validate SmartStore only if user provided config
+ if len(obj.Spec.SmartStore.VolList) > 0 || len(obj.Spec.SmartStore.IndexList) > 0 {
+ allErrs = append(allErrs, validateSmartStore(&obj.Spec.SmartStore, field.NewPath("spec").Child("smartstore"))...)
+ }
+
+ // Validate AppFramework only if user provided config
+ if len(obj.Spec.AppFrameworkConfig.VolList) > 0 || len(obj.Spec.AppFrameworkConfig.AppSources) > 0 {
+ allErrs = append(allErrs, validateAppFramework(&obj.Spec.AppFrameworkConfig, field.NewPath("spec").Child("appRepo"))...)
+ }
+
+ return allErrs
+}
+
+// ValidateClusterManagerUpdate validates a ClusterManager on UPDATE
+// TODO: Add immutable field validation here (e.g., compare obj vs oldObj for fields that cannot change after creation)
+func ValidateClusterManagerUpdate(obj, oldObj *enterpriseApi.ClusterManager) field.ErrorList {
+ return ValidateClusterManagerCreate(obj)
+}
+
+// GetClusterManagerWarningsOnCreate returns warnings for ClusterManager CREATE
+func GetClusterManagerWarningsOnCreate(obj *enterpriseApi.ClusterManager) []string {
+ return getCommonWarnings(&obj.Spec.CommonSplunkSpec)
+}
+
+// GetClusterManagerWarningsOnUpdate returns warnings for ClusterManager UPDATE
+func GetClusterManagerWarningsOnUpdate(obj, oldObj *enterpriseApi.ClusterManager) []string {
+ return GetClusterManagerWarningsOnCreate(obj)
+}
diff --git a/pkg/splunk/enterprise/validation/clustermanager_validation_test.go b/pkg/splunk/enterprise/validation/clustermanager_validation_test.go
new file mode 100644
index 000000000..84f23724a
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/clustermanager_validation_test.go
@@ -0,0 +1,200 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "testing"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateClusterManagerCreate(t *testing.T) {
+ tests := []struct {
+ name string
+ obj *enterpriseApi.ClusterManager
+ wantErrCount int
+ wantErrField string
+ }{
+ {
+ name: "valid cluster manager - minimal",
+ obj: &enterpriseApi.ClusterManager{},
+ wantErrCount: 0,
+ },
+ {
+ name: "valid cluster manager - with SmartStore",
+ obj: &enterpriseApi.ClusterManager{
+ Spec: enterpriseApi.ClusterManagerSpec{
+ SmartStore: enterpriseApi.SmartStoreSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "vol1", Endpoint: "s3://bucket"},
+ },
+ IndexList: []enterpriseApi.IndexSpec{
+ {Name: "idx1", IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{VolName: "vol1"}},
+ },
+ },
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid cluster manager - SmartStore volume without name",
+ obj: &enterpriseApi.ClusterManager{
+ Spec: enterpriseApi.ClusterManagerSpec{
+ SmartStore: enterpriseApi.SmartStoreSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "", Endpoint: "s3://bucket"},
+ },
+ },
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.smartstore.volumes[0].name",
+ },
+ {
+ name: "invalid cluster manager - SmartStore volume without endpoint or path",
+ obj: &enterpriseApi.ClusterManager{
+ Spec: enterpriseApi.ClusterManagerSpec{
+ SmartStore: enterpriseApi.SmartStoreSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "vol1", Endpoint: "", Path: ""},
+ },
+ },
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.smartstore.volumes[0]",
+ },
+ {
+ name: "valid cluster manager - with AppFramework",
+ obj: &enterpriseApi.ClusterManager{
+ Spec: enterpriseApi.ClusterManagerSpec{
+ AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "appvol", Endpoint: "s3://apps"},
+ },
+ AppSources: []enterpriseApi.AppSourceSpec{
+ {Name: "apps", Location: "/apps"},
+ },
+ },
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid cluster manager - AppFramework source without name",
+ obj: &enterpriseApi.ClusterManager{
+ Spec: enterpriseApi.ClusterManagerSpec{
+ AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{
+ AppSources: []enterpriseApi.AppSourceSpec{
+ {Name: "", Location: "/apps"},
+ },
+ },
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.appRepo.appSources[0].name",
+ },
+ {
+ name: "invalid cluster manager - multiple errors",
+ obj: &enterpriseApi.ClusterManager{
+ Spec: enterpriseApi.ClusterManagerSpec{
+ SmartStore: enterpriseApi.SmartStoreSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "", Endpoint: ""},
+ },
+ },
+ },
+ },
+ wantErrCount: 2, // missing name + missing endpoint/path
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := ValidateClusterManagerCreate(tt.obj)
+ assert.Len(t, errs, tt.wantErrCount, "unexpected error count")
+ if tt.wantErrField != "" && len(errs) > 0 {
+ assert.Equal(t, tt.wantErrField, errs[0].Field, "unexpected error field")
+ }
+ })
+ }
+}
+
+func TestValidateClusterManagerUpdate(t *testing.T) {
+ tests := []struct {
+ name string
+ obj *enterpriseApi.ClusterManager
+ oldObj *enterpriseApi.ClusterManager
+ wantErrCount int
+ }{
+ {
+ name: "valid update - no changes",
+ obj: &enterpriseApi.ClusterManager{},
+ oldObj: &enterpriseApi.ClusterManager{},
+ wantErrCount: 0,
+ },
+ {
+ name: "valid update - add SmartStore",
+ obj: &enterpriseApi.ClusterManager{
+ Spec: enterpriseApi.ClusterManagerSpec{
+ SmartStore: enterpriseApi.SmartStoreSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "vol1", Endpoint: "s3://bucket"},
+ },
+ },
+ },
+ },
+ oldObj: &enterpriseApi.ClusterManager{},
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid update - invalid SmartStore config",
+ obj: &enterpriseApi.ClusterManager{
+ Spec: enterpriseApi.ClusterManagerSpec{
+ SmartStore: enterpriseApi.SmartStoreSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "", Endpoint: ""},
+ },
+ },
+ },
+ },
+ oldObj: &enterpriseApi.ClusterManager{},
+ wantErrCount: 2, // missing name + missing endpoint/path
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := ValidateClusterManagerUpdate(tt.obj, tt.oldObj)
+ assert.Len(t, errs, tt.wantErrCount, "unexpected error count")
+ })
+ }
+}
+
+func TestGetClusterManagerWarningsOnCreate(t *testing.T) {
+ obj := &enterpriseApi.ClusterManager{}
+ warnings := GetClusterManagerWarningsOnCreate(obj)
+ assert.Empty(t, warnings, "expected no warnings")
+}
+
+func TestGetClusterManagerWarningsOnUpdate(t *testing.T) {
+ obj := &enterpriseApi.ClusterManager{}
+ oldObj := &enterpriseApi.ClusterManager{}
+ warnings := GetClusterManagerWarningsOnUpdate(obj, oldObj)
+ assert.Empty(t, warnings, "expected no warnings")
+}
diff --git a/pkg/splunk/enterprise/validation/common_validation.go b/pkg/splunk/enterprise/validation/common_validation.go
new file mode 100644
index 000000000..be1e3c498
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/common_validation.go
@@ -0,0 +1,135 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "regexp"
+
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+)
+
+// storageCapacityRegex validates storage capacity format (e.g., "10Gi", "100Gi")
+var storageCapacityRegex = regexp.MustCompile(`^[0-9]+Gi$`)
+
+// validateCommonSplunkSpec validates fields common to all Splunk CRDs
+func validateCommonSplunkSpec(spec *enterpriseApi.CommonSplunkSpec, fldPath *field.Path) field.ErrorList {
+ var allErrs field.ErrorList
+
+ // Note: The following fields are validated via kubebuilder annotations in api/v4/common_types.go:
+ // - ImagePullPolicy: +kubebuilder:validation:Enum=Always;Never;IfNotPresent
+ // - LivenessInitialDelaySeconds: +kubebuilder:validation:Minimum=0
+ // - ReadinessInitialDelaySeconds: +kubebuilder:validation:Minimum=0
+
+ // Validate EtcVolumeStorageConfig
+ allErrs = append(allErrs, validateStorageConfig(&spec.EtcVolumeStorageConfig, fldPath.Child("etcVolumeStorageConfig"))...)
+
+ // Validate VarVolumeStorageConfig
+ allErrs = append(allErrs, validateStorageConfig(&spec.VarVolumeStorageConfig, fldPath.Child("varVolumeStorageConfig"))...)
+
+ return allErrs
+}
+
+// validateStorageConfig validates storage configuration
+func validateStorageConfig(config *enterpriseApi.StorageClassSpec, fldPath *field.Path) field.ErrorList {
+ var allErrs field.ErrorList
+
+ // Validate storageCapacity format (must be in Gi format, e.g., "10Gi", "100Gi")
+ if config.StorageCapacity != "" {
+ if !storageCapacityRegex.MatchString(config.StorageCapacity) {
+ allErrs = append(allErrs, field.Invalid(
+ fldPath.Child("storageCapacity"),
+ config.StorageCapacity,
+ "must be in Gi format (e.g., '10Gi', '100Gi')"))
+ }
+ }
+
+ // Validate storageClassName is not empty when ephemeralStorage is false and storageCapacity is set
+ if !config.EphemeralStorage && config.StorageCapacity != "" && config.StorageClassName == "" {
+ allErrs = append(allErrs, field.Required(
+ fldPath.Child("storageClassName"),
+ "storageClassName is required when using persistent storage"))
+ }
+
+ return allErrs
+}
+
+// validateSmartStore validates SmartStore configuration
+func validateSmartStore(smartStore *enterpriseApi.SmartStoreSpec, fldPath *field.Path) field.ErrorList {
+ var allErrs field.ErrorList
+
+ // Validate volume definitions
+ for i, vol := range smartStore.VolList {
+ volPath := fldPath.Child("volumes").Index(i)
+ if vol.Name == "" {
+ allErrs = append(allErrs, field.Required(volPath.Child("name"), "volume name is required"))
+ }
+ if vol.Endpoint == "" && vol.Path == "" {
+ allErrs = append(allErrs, field.Required(volPath, "either endpoint or path must be specified"))
+ }
+ }
+
+ // Validate index definitions
+ for i, idx := range smartStore.IndexList {
+ idxPath := fldPath.Child("indexes").Index(i)
+ if idx.Name == "" {
+ allErrs = append(allErrs, field.Required(idxPath.Child("name"), "index name is required"))
+ }
+ if idx.VolName == "" {
+ allErrs = append(allErrs, field.Required(idxPath.Child("volumeName"), "volume name is required for index"))
+ }
+ }
+
+ return allErrs
+}
+
+// validateAppFramework validates App Framework configuration
+func validateAppFramework(appConfig *enterpriseApi.AppFrameworkSpec, fldPath *field.Path) field.ErrorList {
+ var allErrs field.ErrorList
+
+ // Validate app sources
+ for i, source := range appConfig.AppSources {
+ sourcePath := fldPath.Child("appSources").Index(i)
+ if source.Name == "" {
+ allErrs = append(allErrs, field.Required(sourcePath.Child("name"), "app source name is required"))
+ }
+ if source.Location == "" {
+ allErrs = append(allErrs, field.Required(sourcePath.Child("location"), "app source location is required"))
+ }
+ }
+
+ // Validate volume definitions
+ for i, vol := range appConfig.VolList {
+ volPath := fldPath.Child("volumes").Index(i)
+ if vol.Name == "" {
+ allErrs = append(allErrs, field.Required(volPath.Child("name"), "volume name is required"))
+ }
+ }
+
+ return allErrs
+}
+
+// getCommonWarnings returns warnings for common Splunk spec fields
+func getCommonWarnings(spec *enterpriseApi.CommonSplunkSpec) []string {
+ var warnings []string
+
+ // Warn about deprecated fields or configurations
+ // Add warnings as needed based on spec fields
+
+ return warnings
+}
diff --git a/pkg/splunk/enterprise/validation/common_validation_test.go b/pkg/splunk/enterprise/validation/common_validation_test.go
new file mode 100644
index 000000000..22d48d169
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/common_validation_test.go
@@ -0,0 +1,379 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "testing"
+
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+)
+
+func TestValidateCommonSplunkSpec(t *testing.T) {
+ // Note: The following fields are validated via kubebuilder annotations, not webhook:
+ // - ImagePullPolicy: +kubebuilder:validation:Enum
+ // - LivenessInitialDelaySeconds: +kubebuilder:validation:Minimum=0
+ // - ReadinessInitialDelaySeconds: +kubebuilder:validation:Minimum=0
+ tests := []struct {
+ name string
+ spec *enterpriseApi.CommonSplunkSpec
+ wantErrCount int
+ wantErrField string
+ }{
+ {
+ name: "valid spec - empty",
+ spec: &enterpriseApi.CommonSplunkSpec{},
+ wantErrCount: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := validateCommonSplunkSpec(tt.spec, field.NewPath("spec"))
+
+ if len(errs) != tt.wantErrCount {
+ t.Errorf("validateCommonSplunkSpec() got %d errors, want %d", len(errs), tt.wantErrCount)
+ for _, e := range errs {
+ t.Logf(" error: %s", e.Error())
+ }
+ }
+
+ if tt.wantErrField != "" && len(errs) > 0 {
+ found := false
+ for _, e := range errs {
+ if e.Field == tt.wantErrField {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("validateCommonSplunkSpec() expected error on field %s", tt.wantErrField)
+ }
+ }
+ })
+ }
+}
+
+func TestValidateSmartStore(t *testing.T) {
+ tests := []struct {
+ name string
+ smartStore *enterpriseApi.SmartStoreSpec
+ wantErrCount int
+ }{
+ {
+ name: "empty smart store",
+ smartStore: &enterpriseApi.SmartStoreSpec{},
+ wantErrCount: 0,
+ },
+ {
+ name: "valid smart store with volumes and indexes",
+ smartStore: &enterpriseApi.SmartStoreSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "vol1", Endpoint: "s3://bucket"},
+ },
+ IndexList: []enterpriseApi.IndexSpec{
+ {
+ Name: "idx1",
+ IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{
+ VolName: "vol1",
+ },
+ },
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "volume without name",
+ smartStore: &enterpriseApi.SmartStoreSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "", Endpoint: "s3://bucket"},
+ },
+ },
+ wantErrCount: 1,
+ },
+ {
+ name: "volume without endpoint or path",
+ smartStore: &enterpriseApi.SmartStoreSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "vol1", Endpoint: "", Path: ""},
+ },
+ },
+ wantErrCount: 1,
+ },
+ {
+ name: "index without name",
+ smartStore: &enterpriseApi.SmartStoreSpec{
+ IndexList: []enterpriseApi.IndexSpec{
+ {
+ Name: "",
+ IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{
+ VolName: "vol1",
+ },
+ },
+ },
+ },
+ wantErrCount: 1,
+ },
+ {
+ name: "index without volume name",
+ smartStore: &enterpriseApi.SmartStoreSpec{
+ IndexList: []enterpriseApi.IndexSpec{
+ {
+ Name: "idx1",
+ IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{
+ VolName: "",
+ },
+ },
+ },
+ },
+ wantErrCount: 1,
+ },
+ {
+ name: "multiple validation errors",
+ smartStore: &enterpriseApi.SmartStoreSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "", Endpoint: ""},
+ },
+ IndexList: []enterpriseApi.IndexSpec{
+ {
+ Name: "",
+ IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{
+ VolName: "",
+ },
+ },
+ },
+ },
+ wantErrCount: 4,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := validateSmartStore(tt.smartStore, field.NewPath("spec").Child("smartstore"))
+
+ if len(errs) != tt.wantErrCount {
+ t.Errorf("validateSmartStore() got %d errors, want %d", len(errs), tt.wantErrCount)
+ for _, e := range errs {
+ t.Logf(" error: %s", e.Error())
+ }
+ }
+ })
+ }
+}
+
+func TestValidateAppFramework(t *testing.T) {
+ tests := []struct {
+ name string
+ appConfig *enterpriseApi.AppFrameworkSpec
+ wantErrCount int
+ }{
+ {
+ name: "empty app framework",
+ appConfig: &enterpriseApi.AppFrameworkSpec{},
+ wantErrCount: 0,
+ },
+ {
+ name: "valid app framework",
+ appConfig: &enterpriseApi.AppFrameworkSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "vol1", Endpoint: "s3://bucket"},
+ },
+ AppSources: []enterpriseApi.AppSourceSpec{
+ {Name: "source1", Location: "/apps"},
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "app source without name",
+ appConfig: &enterpriseApi.AppFrameworkSpec{
+ AppSources: []enterpriseApi.AppSourceSpec{
+ {Name: "", Location: "/apps"},
+ },
+ },
+ wantErrCount: 1,
+ },
+ {
+ name: "app source without location",
+ appConfig: &enterpriseApi.AppFrameworkSpec{
+ AppSources: []enterpriseApi.AppSourceSpec{
+ {Name: "source1", Location: ""},
+ },
+ },
+ wantErrCount: 1,
+ },
+ {
+ name: "volume without name",
+ appConfig: &enterpriseApi.AppFrameworkSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "", Endpoint: "s3://bucket"},
+ },
+ },
+ wantErrCount: 1,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := validateAppFramework(tt.appConfig, field.NewPath("spec").Child("appFramework"))
+
+ if len(errs) != tt.wantErrCount {
+ t.Errorf("validateAppFramework() got %d errors, want %d", len(errs), tt.wantErrCount)
+ for _, e := range errs {
+ t.Logf(" error: %s", e.Error())
+ }
+ }
+ })
+ }
+}
+
+func TestValidateStorageConfig(t *testing.T) {
+ tests := []struct {
+ name string
+ config *enterpriseApi.StorageClassSpec
+ wantErrCount int
+ wantErrField string
+ }{
+ {
+ name: "empty config - valid",
+ config: &enterpriseApi.StorageClassSpec{},
+ wantErrCount: 0,
+ },
+ {
+ name: "valid storage capacity - 10Gi",
+ config: &enterpriseApi.StorageClassSpec{
+ StorageCapacity: "10Gi",
+ StorageClassName: "standard",
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "valid storage capacity - 100Gi",
+ config: &enterpriseApi.StorageClassSpec{
+ StorageCapacity: "100Gi",
+ StorageClassName: "fast",
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid storage capacity - missing Gi suffix",
+ config: &enterpriseApi.StorageClassSpec{
+ StorageCapacity: "10",
+ StorageClassName: "standard",
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.storageCapacity",
+ },
+ {
+ name: "invalid storage capacity - wrong suffix Mi",
+ config: &enterpriseApi.StorageClassSpec{
+ StorageCapacity: "10Mi",
+ StorageClassName: "standard",
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.storageCapacity",
+ },
+ {
+ name: "invalid storage capacity - text value",
+ config: &enterpriseApi.StorageClassSpec{
+ StorageCapacity: "large",
+ StorageClassName: "standard",
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.storageCapacity",
+ },
+ {
+ name: "missing storageClassName with persistent storage",
+ config: &enterpriseApi.StorageClassSpec{
+ StorageCapacity: "10Gi",
+ EphemeralStorage: false,
+ StorageClassName: "",
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.storageClassName",
+ },
+ {
+ name: "ephemeral storage - storageClassName not required",
+ config: &enterpriseApi.StorageClassSpec{
+ StorageCapacity: "10Gi",
+ EphemeralStorage: true,
+ StorageClassName: "",
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "multiple errors - invalid capacity and missing className",
+ config: &enterpriseApi.StorageClassSpec{
+ StorageCapacity: "10MB",
+ EphemeralStorage: false,
+ StorageClassName: "",
+ },
+ wantErrCount: 2,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := validateStorageConfig(tt.config, field.NewPath("spec"))
+
+ if len(errs) != tt.wantErrCount {
+ t.Errorf("validateStorageConfig() got %d errors, want %d", len(errs), tt.wantErrCount)
+ for _, e := range errs {
+ t.Logf(" error: %s", e.Error())
+ }
+ }
+
+ if tt.wantErrField != "" && len(errs) > 0 {
+ found := false
+ for _, e := range errs {
+ if e.Field == tt.wantErrField {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("validateStorageConfig() expected error on field %s", tt.wantErrField)
+ }
+ }
+ })
+ }
+}
+
+func TestGetCommonWarnings(t *testing.T) {
+ tests := []struct {
+ name string
+ spec *enterpriseApi.CommonSplunkSpec
+ wantWarnings int
+ }{
+ {
+ name: "empty spec - no warnings",
+ spec: &enterpriseApi.CommonSplunkSpec{},
+ wantWarnings: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ warnings := getCommonWarnings(tt.spec)
+
+ if len(warnings) != tt.wantWarnings {
+ t.Errorf("getCommonWarnings() got %d warnings, want %d", len(warnings), tt.wantWarnings)
+ }
+ })
+ }
+}
diff --git a/pkg/splunk/enterprise/validation/indexercluster_validation.go b/pkg/splunk/enterprise/validation/indexercluster_validation.go
new file mode 100644
index 000000000..3c342b05e
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/indexercluster_validation.go
@@ -0,0 +1,57 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+)
+
+// ValidateIndexerClusterCreate validates an IndexerCluster on CREATE
+func ValidateIndexerClusterCreate(obj *enterpriseApi.IndexerCluster) field.ErrorList {
+ var allErrs field.ErrorList
+
+ // Validate replicas - IndexerCluster requires minimum 3 replicas
+ if obj.Spec.Replicas < 3 {
+ allErrs = append(allErrs, field.Invalid(
+ field.NewPath("spec").Child("replicas"),
+ obj.Spec.Replicas,
+ "IndexerCluster requires at least 3 replicas"))
+ }
+
+ // Validate common spec
+ allErrs = append(allErrs, validateCommonSplunkSpec(&obj.Spec.CommonSplunkSpec, field.NewPath("spec"))...)
+
+ return allErrs
+}
+
+// ValidateIndexerClusterUpdate validates an IndexerCluster on UPDATE
+// TODO: Add immutable field validation here (e.g., compare obj vs oldObj for fields that cannot change after creation)
+func ValidateIndexerClusterUpdate(obj, oldObj *enterpriseApi.IndexerCluster) field.ErrorList {
+ return ValidateIndexerClusterCreate(obj)
+}
+
+// GetIndexerClusterWarningsOnCreate returns warnings for IndexerCluster CREATE
+func GetIndexerClusterWarningsOnCreate(obj *enterpriseApi.IndexerCluster) []string {
+ return getCommonWarnings(&obj.Spec.CommonSplunkSpec)
+}
+
+// GetIndexerClusterWarningsOnUpdate returns warnings for IndexerCluster UPDATE
+func GetIndexerClusterWarningsOnUpdate(obj, oldObj *enterpriseApi.IndexerCluster) []string {
+ return GetIndexerClusterWarningsOnCreate(obj)
+}
diff --git a/pkg/splunk/enterprise/validation/indexercluster_validation_test.go b/pkg/splunk/enterprise/validation/indexercluster_validation_test.go
new file mode 100644
index 000000000..1a5a0b5cc
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/indexercluster_validation_test.go
@@ -0,0 +1,181 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "testing"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateIndexerClusterCreate(t *testing.T) {
+ tests := []struct {
+ name string
+ obj *enterpriseApi.IndexerCluster
+ wantErrCount int
+ wantErrField string
+ }{
+ {
+ name: "valid indexer cluster - minimal",
+ obj: &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 3,
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid indexer cluster - zero replicas",
+ obj: &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 0,
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.replicas",
+ },
+ {
+ name: "invalid indexer cluster - less than 3 replicas",
+ obj: &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 2,
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.replicas",
+ },
+ {
+ name: "invalid indexer cluster - negative replicas",
+ obj: &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: -1,
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.replicas",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := ValidateIndexerClusterCreate(tt.obj)
+ assert.Len(t, errs, tt.wantErrCount, "unexpected error count")
+ if tt.wantErrField != "" && len(errs) > 0 {
+ assert.Equal(t, tt.wantErrField, errs[0].Field, "unexpected error field")
+ }
+ })
+ }
+}
+
+func TestValidateIndexerClusterUpdate(t *testing.T) {
+ tests := []struct {
+ name string
+ obj *enterpriseApi.IndexerCluster
+ oldObj *enterpriseApi.IndexerCluster
+ wantErrCount int
+ }{
+ {
+ name: "valid update - same replicas",
+ obj: &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 3,
+ },
+ },
+ oldObj: &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 3,
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "valid update - scale up",
+ obj: &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 5,
+ },
+ },
+ oldObj: &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 3,
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid update - scale down below minimum",
+ obj: &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 1,
+ },
+ },
+ oldObj: &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 3,
+ },
+ },
+ wantErrCount: 1,
+ },
+ {
+ name: "invalid update - negative replicas",
+ obj: &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: -1,
+ },
+ },
+ oldObj: &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 3,
+ },
+ },
+ wantErrCount: 1,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := ValidateIndexerClusterUpdate(tt.obj, tt.oldObj)
+ assert.Len(t, errs, tt.wantErrCount, "unexpected error count")
+ })
+ }
+}
+
+func TestGetIndexerClusterWarningsOnCreate(t *testing.T) {
+ obj := &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 3,
+ },
+ }
+ warnings := GetIndexerClusterWarningsOnCreate(obj)
+ assert.Empty(t, warnings, "expected no warnings")
+}
+
+func TestGetIndexerClusterWarningsOnUpdate(t *testing.T) {
+ obj := &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 3,
+ },
+ }
+ oldObj := &enterpriseApi.IndexerCluster{
+ Spec: enterpriseApi.IndexerClusterSpec{
+ Replicas: 3,
+ },
+ }
+ warnings := GetIndexerClusterWarningsOnUpdate(obj, oldObj)
+ assert.Empty(t, warnings, "expected no warnings")
+}
diff --git a/pkg/splunk/enterprise/validation/licensemanager_validation.go b/pkg/splunk/enterprise/validation/licensemanager_validation.go
new file mode 100644
index 000000000..01efae03e
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/licensemanager_validation.go
@@ -0,0 +1,49 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+)
+
+// ValidateLicenseManagerCreate validates a LicenseManager on CREATE
+func ValidateLicenseManagerCreate(obj *enterpriseApi.LicenseManager) field.ErrorList {
+ var allErrs field.ErrorList
+
+ // Validate common spec
+ allErrs = append(allErrs, validateCommonSplunkSpec(&obj.Spec.CommonSplunkSpec, field.NewPath("spec"))...)
+
+ return allErrs
+}
+
+// ValidateLicenseManagerUpdate validates a LicenseManager on UPDATE
+// TODO: Add immutable field validation here (e.g., compare obj vs oldObj for fields that cannot change after creation)
+func ValidateLicenseManagerUpdate(obj, oldObj *enterpriseApi.LicenseManager) field.ErrorList {
+ return ValidateLicenseManagerCreate(obj)
+}
+
+// GetLicenseManagerWarningsOnCreate returns warnings for LicenseManager CREATE
+func GetLicenseManagerWarningsOnCreate(obj *enterpriseApi.LicenseManager) []string {
+ return getCommonWarnings(&obj.Spec.CommonSplunkSpec)
+}
+
+// GetLicenseManagerWarningsOnUpdate returns warnings for LicenseManager UPDATE
+func GetLicenseManagerWarningsOnUpdate(obj, oldObj *enterpriseApi.LicenseManager) []string {
+ return GetLicenseManagerWarningsOnCreate(obj)
+}
diff --git a/pkg/splunk/enterprise/validation/licensemanager_validation_test.go b/pkg/splunk/enterprise/validation/licensemanager_validation_test.go
new file mode 100644
index 000000000..6d111396a
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/licensemanager_validation_test.go
@@ -0,0 +1,114 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "testing"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateLicenseManagerCreate(t *testing.T) {
+ tests := []struct {
+ name string
+ obj *enterpriseApi.LicenseManager
+ wantErrCount int
+ wantErrField string
+ }{
+ {
+ name: "valid license manager - minimal",
+ obj: &enterpriseApi.LicenseManager{},
+ wantErrCount: 0,
+ },
+ {
+ name: "valid license manager - with storage config",
+ obj: &enterpriseApi.LicenseManager{
+ Spec: enterpriseApi.LicenseManagerSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ EtcVolumeStorageConfig: enterpriseApi.StorageClassSpec{
+ StorageCapacity: "10Gi",
+ StorageClassName: "standard",
+ },
+ },
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid license manager - invalid storage capacity format",
+ obj: &enterpriseApi.LicenseManager{
+ Spec: enterpriseApi.LicenseManagerSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ EtcVolumeStorageConfig: enterpriseApi.StorageClassSpec{
+ StorageCapacity: "10GB",
+ StorageClassName: "standard",
+ },
+ },
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.etcVolumeStorageConfig.storageCapacity",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := ValidateLicenseManagerCreate(tt.obj)
+ assert.Len(t, errs, tt.wantErrCount, "unexpected error count")
+ if tt.wantErrField != "" && len(errs) > 0 {
+ assert.Equal(t, tt.wantErrField, errs[0].Field, "unexpected error field")
+ }
+ })
+ }
+}
+
+func TestValidateLicenseManagerUpdate(t *testing.T) {
+ tests := []struct {
+ name string
+ obj *enterpriseApi.LicenseManager
+ oldObj *enterpriseApi.LicenseManager
+ wantErrCount int
+ }{
+ {
+ name: "valid update - no changes",
+ obj: &enterpriseApi.LicenseManager{},
+ oldObj: &enterpriseApi.LicenseManager{},
+ wantErrCount: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := ValidateLicenseManagerUpdate(tt.obj, tt.oldObj)
+ assert.Len(t, errs, tt.wantErrCount, "unexpected error count")
+ })
+ }
+}
+
+func TestGetLicenseManagerWarningsOnCreate(t *testing.T) {
+ obj := &enterpriseApi.LicenseManager{}
+ warnings := GetLicenseManagerWarningsOnCreate(obj)
+ assert.Empty(t, warnings, "expected no warnings")
+}
+
+func TestGetLicenseManagerWarningsOnUpdate(t *testing.T) {
+ obj := &enterpriseApi.LicenseManager{}
+ oldObj := &enterpriseApi.LicenseManager{}
+ warnings := GetLicenseManagerWarningsOnUpdate(obj, oldObj)
+ assert.Empty(t, warnings, "expected no warnings")
+}
diff --git a/pkg/splunk/enterprise/validation/monitoringconsole_validation.go b/pkg/splunk/enterprise/validation/monitoringconsole_validation.go
new file mode 100644
index 000000000..eeb46003f
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/monitoringconsole_validation.go
@@ -0,0 +1,49 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+)
+
+// ValidateMonitoringConsoleCreate validates a MonitoringConsole on CREATE
+func ValidateMonitoringConsoleCreate(obj *enterpriseApi.MonitoringConsole) field.ErrorList {
+ var allErrs field.ErrorList
+
+ // Validate common spec
+ allErrs = append(allErrs, validateCommonSplunkSpec(&obj.Spec.CommonSplunkSpec, field.NewPath("spec"))...)
+
+ return allErrs
+}
+
+// ValidateMonitoringConsoleUpdate validates a MonitoringConsole on UPDATE
+// TODO: Add immutable field validation here (e.g., compare obj vs oldObj for fields that cannot change after creation)
+func ValidateMonitoringConsoleUpdate(obj, oldObj *enterpriseApi.MonitoringConsole) field.ErrorList {
+ return ValidateMonitoringConsoleCreate(obj)
+}
+
+// GetMonitoringConsoleWarningsOnCreate returns warnings for MonitoringConsole CREATE
+func GetMonitoringConsoleWarningsOnCreate(obj *enterpriseApi.MonitoringConsole) []string {
+ return getCommonWarnings(&obj.Spec.CommonSplunkSpec)
+}
+
+// GetMonitoringConsoleWarningsOnUpdate returns warnings for MonitoringConsole UPDATE
+func GetMonitoringConsoleWarningsOnUpdate(obj, oldObj *enterpriseApi.MonitoringConsole) []string {
+ return GetMonitoringConsoleWarningsOnCreate(obj)
+}
diff --git a/pkg/splunk/enterprise/validation/monitoringconsole_validation_test.go b/pkg/splunk/enterprise/validation/monitoringconsole_validation_test.go
new file mode 100644
index 000000000..6a0fd9122
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/monitoringconsole_validation_test.go
@@ -0,0 +1,129 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "testing"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateMonitoringConsoleCreate(t *testing.T) {
+ tests := []struct {
+ name string
+ obj *enterpriseApi.MonitoringConsole
+ wantErrCount int
+ wantErrField string
+ }{
+ {
+ name: "valid monitoring console - minimal",
+ obj: &enterpriseApi.MonitoringConsole{},
+ wantErrCount: 0,
+ },
+ {
+ name: "valid monitoring console - with storage config",
+ obj: &enterpriseApi.MonitoringConsole{
+ Spec: enterpriseApi.MonitoringConsoleSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ VarVolumeStorageConfig: enterpriseApi.StorageClassSpec{
+ StorageCapacity: "100Gi",
+ StorageClassName: "standard",
+ },
+ },
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid monitoring console - invalid storage capacity format",
+ obj: &enterpriseApi.MonitoringConsole{
+ Spec: enterpriseApi.MonitoringConsoleSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ VarVolumeStorageConfig: enterpriseApi.StorageClassSpec{
+ StorageCapacity: "100GB",
+ StorageClassName: "standard",
+ },
+ },
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.varVolumeStorageConfig.storageCapacity",
+ },
+ {
+ name: "invalid monitoring console - missing storageClassName for persistent storage",
+ obj: &enterpriseApi.MonitoringConsole{
+ Spec: enterpriseApi.MonitoringConsoleSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ VarVolumeStorageConfig: enterpriseApi.StorageClassSpec{
+ StorageCapacity: "100Gi",
+ EphemeralStorage: false,
+ },
+ },
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.varVolumeStorageConfig.storageClassName",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := ValidateMonitoringConsoleCreate(tt.obj)
+ assert.Len(t, errs, tt.wantErrCount, "unexpected error count")
+ if tt.wantErrField != "" && len(errs) > 0 {
+ assert.Equal(t, tt.wantErrField, errs[0].Field, "unexpected error field")
+ }
+ })
+ }
+}
+
+func TestValidateMonitoringConsoleUpdate(t *testing.T) {
+ tests := []struct {
+ name string
+ obj *enterpriseApi.MonitoringConsole
+ oldObj *enterpriseApi.MonitoringConsole
+ wantErrCount int
+ }{
+ {
+ name: "valid update - no changes",
+ obj: &enterpriseApi.MonitoringConsole{},
+ oldObj: &enterpriseApi.MonitoringConsole{},
+ wantErrCount: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := ValidateMonitoringConsoleUpdate(tt.obj, tt.oldObj)
+ assert.Len(t, errs, tt.wantErrCount, "unexpected error count")
+ })
+ }
+}
+
+func TestGetMonitoringConsoleWarningsOnCreate(t *testing.T) {
+ obj := &enterpriseApi.MonitoringConsole{}
+ warnings := GetMonitoringConsoleWarningsOnCreate(obj)
+ assert.Empty(t, warnings, "expected no warnings")
+}
+
+func TestGetMonitoringConsoleWarningsOnUpdate(t *testing.T) {
+ obj := &enterpriseApi.MonitoringConsole{}
+ oldObj := &enterpriseApi.MonitoringConsole{}
+ warnings := GetMonitoringConsoleWarningsOnUpdate(obj, oldObj)
+ assert.Empty(t, warnings, "expected no warnings")
+}
diff --git a/pkg/splunk/enterprise/validation/registry.go b/pkg/splunk/enterprise/validation/registry.go
new file mode 100644
index 000000000..298205b99
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/registry.go
@@ -0,0 +1,167 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+)
+
+// GVR constants for all Splunk Enterprise CRDs
+var (
+ StandaloneGVR = schema.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "standalones",
+ }
+
+ IndexerClusterGVR = schema.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "indexerclusters",
+ }
+
+ SearchHeadClusterGVR = schema.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "searchheadclusters",
+ }
+
+ ClusterManagerGVR = schema.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "clustermanagers",
+ }
+
+ ClusterMasterGVR = schema.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "clustermasters",
+ }
+
+ LicenseManagerGVR = schema.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "licensemanagers",
+ }
+
+ LicenseMasterGVR = schema.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "licensemasters",
+ }
+
+ MonitoringConsoleGVR = schema.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "monitoringconsoles",
+ }
+)
+
+// DefaultValidators is the registry of validators for all Splunk Enterprise CRDs
+var DefaultValidators = map[schema.GroupVersionResource]Validator{
+ StandaloneGVR: &GenericValidator[*enterpriseApi.Standalone]{
+ ValidateCreateFunc: ValidateStandaloneCreate,
+ ValidateUpdateFunc: ValidateStandaloneUpdate,
+ WarningsOnCreateFunc: GetStandaloneWarningsOnCreate,
+ WarningsOnUpdateFunc: GetStandaloneWarningsOnUpdate,
+ GroupKind: schema.GroupKind{
+ Group: "enterprise.splunk.com",
+ Kind: "Standalone",
+ },
+ },
+
+ IndexerClusterGVR: &GenericValidator[*enterpriseApi.IndexerCluster]{
+ ValidateCreateFunc: ValidateIndexerClusterCreate,
+ ValidateUpdateFunc: ValidateIndexerClusterUpdate,
+ WarningsOnCreateFunc: GetIndexerClusterWarningsOnCreate,
+ WarningsOnUpdateFunc: GetIndexerClusterWarningsOnUpdate,
+ GroupKind: schema.GroupKind{
+ Group: "enterprise.splunk.com",
+ Kind: "IndexerCluster",
+ },
+ },
+
+ SearchHeadClusterGVR: &GenericValidator[*enterpriseApi.SearchHeadCluster]{
+ ValidateCreateFunc: ValidateSearchHeadClusterCreate,
+ ValidateUpdateFunc: ValidateSearchHeadClusterUpdate,
+ WarningsOnCreateFunc: GetSearchHeadClusterWarningsOnCreate,
+ WarningsOnUpdateFunc: GetSearchHeadClusterWarningsOnUpdate,
+ GroupKind: schema.GroupKind{
+ Group: "enterprise.splunk.com",
+ Kind: "SearchHeadCluster",
+ },
+ },
+
+ ClusterManagerGVR: &GenericValidator[*enterpriseApi.ClusterManager]{
+ ValidateCreateFunc: ValidateClusterManagerCreate,
+ ValidateUpdateFunc: ValidateClusterManagerUpdate,
+ WarningsOnCreateFunc: GetClusterManagerWarningsOnCreate,
+ WarningsOnUpdateFunc: GetClusterManagerWarningsOnUpdate,
+ GroupKind: schema.GroupKind{
+ Group: "enterprise.splunk.com",
+ Kind: "ClusterManager",
+ },
+ },
+
+ // ClusterMaster is an alias for ClusterManager (deprecated)
+ ClusterMasterGVR: &GenericValidator[*enterpriseApi.ClusterManager]{
+ ValidateCreateFunc: ValidateClusterManagerCreate,
+ ValidateUpdateFunc: ValidateClusterManagerUpdate,
+ WarningsOnCreateFunc: GetClusterManagerWarningsOnCreate,
+ WarningsOnUpdateFunc: GetClusterManagerWarningsOnUpdate,
+ GroupKind: schema.GroupKind{
+ Group: "enterprise.splunk.com",
+ Kind: "ClusterManager",
+ },
+ },
+
+ LicenseManagerGVR: &GenericValidator[*enterpriseApi.LicenseManager]{
+ ValidateCreateFunc: ValidateLicenseManagerCreate,
+ ValidateUpdateFunc: ValidateLicenseManagerUpdate,
+ WarningsOnCreateFunc: GetLicenseManagerWarningsOnCreate,
+ WarningsOnUpdateFunc: GetLicenseManagerWarningsOnUpdate,
+ GroupKind: schema.GroupKind{
+ Group: "enterprise.splunk.com",
+ Kind: "LicenseManager",
+ },
+ },
+
+ // LicenseMaster is an alias for LicenseManager (deprecated)
+ LicenseMasterGVR: &GenericValidator[*enterpriseApi.LicenseManager]{
+ ValidateCreateFunc: ValidateLicenseManagerCreate,
+ ValidateUpdateFunc: ValidateLicenseManagerUpdate,
+ WarningsOnCreateFunc: GetLicenseManagerWarningsOnCreate,
+ WarningsOnUpdateFunc: GetLicenseManagerWarningsOnUpdate,
+ GroupKind: schema.GroupKind{
+ Group: "enterprise.splunk.com",
+ Kind: "LicenseManager",
+ },
+ },
+
+ MonitoringConsoleGVR: &GenericValidator[*enterpriseApi.MonitoringConsole]{
+ ValidateCreateFunc: ValidateMonitoringConsoleCreate,
+ ValidateUpdateFunc: ValidateMonitoringConsoleUpdate,
+ WarningsOnCreateFunc: GetMonitoringConsoleWarningsOnCreate,
+ WarningsOnUpdateFunc: GetMonitoringConsoleWarningsOnUpdate,
+ GroupKind: schema.GroupKind{
+ Group: "enterprise.splunk.com",
+ Kind: "MonitoringConsole",
+ },
+ },
+}
diff --git a/pkg/splunk/enterprise/validation/searchheadcluster_validation.go b/pkg/splunk/enterprise/validation/searchheadcluster_validation.go
new file mode 100644
index 000000000..2950d5fcc
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/searchheadcluster_validation.go
@@ -0,0 +1,62 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+)
+
+// ValidateSearchHeadClusterCreate validates a SearchHeadCluster on CREATE
+func ValidateSearchHeadClusterCreate(obj *enterpriseApi.SearchHeadCluster) field.ErrorList {
+ var allErrs field.ErrorList
+
+ // Validate replicas - SearchHeadCluster requires minimum 3 replicas
+ if obj.Spec.Replicas < 3 {
+ allErrs = append(allErrs, field.Invalid(
+ field.NewPath("spec").Child("replicas"),
+ obj.Spec.Replicas,
+ "SearchHeadCluster requires at least 3 replicas"))
+ }
+
+ // Validate common spec
+ allErrs = append(allErrs, validateCommonSplunkSpec(&obj.Spec.CommonSplunkSpec, field.NewPath("spec"))...)
+
+ // Validate AppFramework only if user provided config
+ if len(obj.Spec.AppFrameworkConfig.VolList) > 0 || len(obj.Spec.AppFrameworkConfig.AppSources) > 0 {
+ allErrs = append(allErrs, validateAppFramework(&obj.Spec.AppFrameworkConfig, field.NewPath("spec").Child("appRepo"))...)
+ }
+
+ return allErrs
+}
+
+// ValidateSearchHeadClusterUpdate validates a SearchHeadCluster on UPDATE
+// TODO: Add immutable field validation here (e.g., compare obj vs oldObj for fields that cannot change after creation)
+func ValidateSearchHeadClusterUpdate(obj, oldObj *enterpriseApi.SearchHeadCluster) field.ErrorList {
+ return ValidateSearchHeadClusterCreate(obj)
+}
+
+// GetSearchHeadClusterWarningsOnCreate returns warnings for SearchHeadCluster CREATE
+func GetSearchHeadClusterWarningsOnCreate(obj *enterpriseApi.SearchHeadCluster) []string {
+ return getCommonWarnings(&obj.Spec.CommonSplunkSpec)
+}
+
+// GetSearchHeadClusterWarningsOnUpdate returns warnings for SearchHeadCluster UPDATE
+func GetSearchHeadClusterWarningsOnUpdate(obj, oldObj *enterpriseApi.SearchHeadCluster) []string {
+ return GetSearchHeadClusterWarningsOnCreate(obj)
+}
diff --git a/pkg/splunk/enterprise/validation/searchheadcluster_validation_test.go b/pkg/splunk/enterprise/validation/searchheadcluster_validation_test.go
new file mode 100644
index 000000000..3f1661b14
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/searchheadcluster_validation_test.go
@@ -0,0 +1,228 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "testing"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateSearchHeadClusterCreate(t *testing.T) {
+ tests := []struct {
+ name string
+ obj *enterpriseApi.SearchHeadCluster
+ wantErrCount int
+ wantErrField string
+ }{
+ {
+ name: "valid search head cluster - minimal",
+ obj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 3,
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid search head cluster - zero replicas",
+ obj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 0,
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.replicas",
+ },
+ {
+ name: "invalid search head cluster - less than 3 replicas",
+ obj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 2,
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.replicas",
+ },
+ {
+ name: "invalid search head cluster - negative replicas",
+ obj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: -1,
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.replicas",
+ },
+ {
+ name: "valid search head cluster - with AppFramework",
+ obj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 3,
+ AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "appvol", Endpoint: "s3://apps"},
+ },
+ AppSources: []enterpriseApi.AppSourceSpec{
+ {Name: "apps", Location: "/apps"},
+ },
+ },
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid search head cluster - AppFramework source without name",
+ obj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 3,
+ AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{
+ AppSources: []enterpriseApi.AppSourceSpec{
+ {Name: "", Location: "/apps"},
+ },
+ },
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.appRepo.appSources[0].name",
+ },
+ {
+ name: "invalid search head cluster - AppFramework source without location",
+ obj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 3,
+ AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{
+ AppSources: []enterpriseApi.AppSourceSpec{
+ {Name: "apps", Location: ""},
+ },
+ },
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.appRepo.appSources[0].location",
+ },
+ {
+ name: "invalid search head cluster - multiple errors",
+ obj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: -1,
+ AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{
+ AppSources: []enterpriseApi.AppSourceSpec{
+ {Name: "", Location: ""},
+ },
+ },
+ },
+ },
+ wantErrCount: 3, // negative replicas + missing name + missing location
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := ValidateSearchHeadClusterCreate(tt.obj)
+ assert.Len(t, errs, tt.wantErrCount, "unexpected error count")
+ if tt.wantErrField != "" && len(errs) > 0 {
+ assert.Equal(t, tt.wantErrField, errs[0].Field, "unexpected error field")
+ }
+ })
+ }
+}
+
+func TestValidateSearchHeadClusterUpdate(t *testing.T) {
+ tests := []struct {
+ name string
+ obj *enterpriseApi.SearchHeadCluster
+ oldObj *enterpriseApi.SearchHeadCluster
+ wantErrCount int
+ }{
+ {
+ name: "valid update - same replicas",
+ obj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 3,
+ },
+ },
+ oldObj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 3,
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "valid update - scale up",
+ obj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 5,
+ },
+ },
+ oldObj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 3,
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid update - negative replicas",
+ obj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: -1,
+ },
+ },
+ oldObj: &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 3,
+ },
+ },
+ wantErrCount: 1,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := ValidateSearchHeadClusterUpdate(tt.obj, tt.oldObj)
+ assert.Len(t, errs, tt.wantErrCount, "unexpected error count")
+ })
+ }
+}
+
+func TestGetSearchHeadClusterWarningsOnCreate(t *testing.T) {
+ obj := &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 3,
+ },
+ }
+ warnings := GetSearchHeadClusterWarningsOnCreate(obj)
+ assert.Empty(t, warnings, "expected no warnings")
+}
+
+func TestGetSearchHeadClusterWarningsOnUpdate(t *testing.T) {
+ obj := &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 3,
+ },
+ }
+ oldObj := &enterpriseApi.SearchHeadCluster{
+ Spec: enterpriseApi.SearchHeadClusterSpec{
+ Replicas: 3,
+ },
+ }
+ warnings := GetSearchHeadClusterWarningsOnUpdate(obj, oldObj)
+ assert.Empty(t, warnings, "expected no warnings")
+}
diff --git a/pkg/splunk/enterprise/validation/server.go b/pkg/splunk/enterprise/validation/server.go
new file mode 100644
index 000000000..9f8429d8f
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/server.go
@@ -0,0 +1,223 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ admissionv1 "k8s.io/api/admission/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+)
+
+var serverLog = ctrl.Log.WithName("webhook-server")
+
+// WebhookServerOptions contains configuration for the webhook server
+type WebhookServerOptions struct {
+ // TLSCertFile is the path to the TLS certificate file
+ TLSCertFile string
+
+ // TLSKeyFile is the path to the TLS key file
+ TLSKeyFile string
+
+ // Port is the port to listen on
+ Port int
+
+ // Validators is the map of validators by GVR
+ Validators map[schema.GroupVersionResource]Validator
+
+ // CertDir is the directory containing tls.crt and tls.key
+ CertDir string
+
+ // ReadTimeout is the maximum duration for reading the entire request (default: 10s)
+ ReadTimeout time.Duration
+
+ // WriteTimeout is the maximum duration before timing out writes of the response (default: 10s)
+ WriteTimeout time.Duration
+}
+
+// WebhookServer is the HTTP server for validation webhooks
+type WebhookServer struct {
+ options WebhookServerOptions
+ httpServer *http.Server
+}
+
+// NewWebhookServer creates a new webhook server
+func NewWebhookServer(options WebhookServerOptions) *WebhookServer {
+ return &WebhookServer{
+ options: options,
+ }
+}
+
+// Start starts the webhook server
+func (s *WebhookServer) Start(ctx context.Context) error {
+ mux := http.NewServeMux()
+
+ // Register validation endpoint
+ mux.HandleFunc("/validate", s.handleValidate)
+
+ // Register health check endpoint
+ mux.HandleFunc("/readyz", s.handleReadyz)
+
+ // Determine cert and key paths
+ certFile := s.options.TLSCertFile
+ keyFile := s.options.TLSKeyFile
+ if certFile == "" && s.options.CertDir != "" {
+ certFile = s.options.CertDir + "/tls.crt"
+ keyFile = s.options.CertDir + "/tls.key"
+ }
+
+ // Configure TLS
+ tlsConfig := &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ }
+
+ // Use configured timeouts or defaults
+ readTimeout := s.options.ReadTimeout
+ if readTimeout == 0 {
+ readTimeout = 10 * time.Second
+ }
+ writeTimeout := s.options.WriteTimeout
+ if writeTimeout == 0 {
+ writeTimeout = 10 * time.Second
+ }
+
+ s.httpServer = &http.Server{
+ Addr: fmt.Sprintf(":%d", s.options.Port),
+ Handler: mux,
+ TLSConfig: tlsConfig,
+ ReadTimeout: readTimeout,
+ WriteTimeout: writeTimeout,
+ }
+
+ serverLog.Info("Starting webhook server", "port", s.options.Port)
+
+ // Start server in goroutine
+ errChan := make(chan error, 1)
+ go func() {
+ if certFile != "" && keyFile != "" {
+ errChan <- s.httpServer.ListenAndServeTLS(certFile, keyFile)
+ } else {
+ errChan <- s.httpServer.ListenAndServe()
+ }
+ }()
+
+ // Wait for context cancellation or server error
+ select {
+ case <-ctx.Done():
+ serverLog.Info("Shutting down webhook server")
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ return s.httpServer.Shutdown(shutdownCtx)
+ case err := <-errChan:
+ return err
+ }
+}
+
+// handleValidate handles validation requests
+func (s *WebhookServer) handleValidate(w http.ResponseWriter, r *http.Request) {
+ reqLog := log.FromContext(r.Context()).WithName("webhook-server")
+ reqLog.V(1).Info("Received validation request", "method", r.Method, "path", r.URL.Path)
+
+ if r.Method != http.MethodPost {
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ reqLog.Error(err, "Failed to read request body")
+ http.Error(w, "Failed to read request body", http.StatusBadRequest)
+ return
+ }
+ defer r.Body.Close()
+
+ var admissionReview admissionv1.AdmissionReview
+ if err := json.Unmarshal(body, &admissionReview); err != nil {
+ reqLog.Error(err, "Failed to decode admission review")
+ http.Error(w, "Failed to decode admission review", http.StatusBadRequest)
+ return
+ }
+
+ if admissionReview.Request != nil {
+ reqLog.Info("Processing admission request",
+ "kind", admissionReview.Request.Kind.Kind,
+ "name", admissionReview.Request.Name,
+ "namespace", admissionReview.Request.Namespace,
+ "operation", admissionReview.Request.Operation,
+ "user", admissionReview.Request.UserInfo.Username)
+ }
+
+ warnings, validationErr := Validate(&admissionReview, s.options.Validators)
+
+ response := &admissionv1.AdmissionResponse{
+ UID: admissionReview.Request.UID,
+ }
+
+ if validationErr != nil {
+ reqLog.Info("Validation failed",
+ "kind", admissionReview.Request.Kind.Kind,
+ "name", admissionReview.Request.Name,
+ "error", validationErr.Error())
+ response.Allowed = false
+ response.Result = &metav1.Status{
+ Status: metav1.StatusFailure,
+ Message: validationErr.Error(),
+ Reason: metav1.StatusReasonInvalid,
+ Code: http.StatusUnprocessableEntity,
+ }
+ } else {
+ response.Allowed = true
+ response.Result = &metav1.Status{
+ Status: metav1.StatusSuccess,
+ Code: http.StatusOK,
+ }
+ }
+
+ if len(warnings) > 0 {
+ response.Warnings = warnings
+ }
+
+ responseReview := admissionv1.AdmissionReview{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "admission.k8s.io/v1",
+ Kind: "AdmissionReview",
+ },
+ Response: response,
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(responseReview); err != nil {
+ serverLog.Error(err, "Failed to encode response")
+ http.Error(w, "Failed to encode response", http.StatusInternalServerError)
+ return
+ }
+}
+
+// handleReadyz handles readiness probe requests
+func (s *WebhookServer) handleReadyz(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte("ok"))
+}
diff --git a/pkg/splunk/enterprise/validation/server_test.go b/pkg/splunk/enterprise/validation/server_test.go
new file mode 100644
index 000000000..27b7ebb32
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/server_test.go
@@ -0,0 +1,462 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "bytes"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ admissionv1 "k8s.io/api/admission/v1"
+ authenticationv1 "k8s.io/api/authentication/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+)
+
+func TestNewWebhookServer(t *testing.T) {
+ options := WebhookServerOptions{
+ Port: 9443,
+ CertDir: "/tmp/certs",
+ }
+
+ server := NewWebhookServer(options)
+
+ if server == nil {
+ t.Fatal("expected non-nil server")
+ }
+ if server.options.Port != 9443 {
+ t.Errorf("expected port 9443, got %d", server.options.Port)
+ }
+ if server.options.CertDir != "/tmp/certs" {
+ t.Errorf("expected certDir /tmp/certs, got %s", server.options.CertDir)
+ }
+}
+
+func TestHandleValidate(t *testing.T) {
+ // Create test validators
+ validators := map[schema.GroupVersionResource]Validator{
+ StandaloneGVR: &GenericValidator[*enterpriseApi.Standalone]{
+ ValidateCreateFunc: func(obj *enterpriseApi.Standalone) field.ErrorList {
+ var allErrs field.ErrorList
+ if obj.Spec.Replicas < 0 {
+ allErrs = append(allErrs, field.Invalid(
+ field.NewPath("spec").Child("replicas"),
+ obj.Spec.Replicas,
+ "replicas must be non-negative"))
+ }
+ return allErrs
+ },
+ ValidateUpdateFunc: func(obj, oldObj *enterpriseApi.Standalone) field.ErrorList {
+ return nil
+ },
+ GroupKind: schema.GroupKind{Group: "enterprise.splunk.com", Kind: "Standalone"},
+ },
+ }
+
+ server := NewWebhookServer(WebhookServerOptions{
+ Port: 9443,
+ Validators: validators,
+ })
+
+ tests := []struct {
+ name string
+ method string
+ body interface{}
+ wantStatusCode int
+ wantAllowed bool
+ checkResponse bool
+ }{
+ {
+ name: "method not allowed - GET",
+ method: http.MethodGet,
+ body: nil,
+ wantStatusCode: http.StatusMethodNotAllowed,
+ checkResponse: false,
+ },
+ {
+ name: "method not allowed - PUT",
+ method: http.MethodPut,
+ body: nil,
+ wantStatusCode: http.StatusMethodNotAllowed,
+ checkResponse: false,
+ },
+ {
+ name: "invalid JSON body",
+ method: http.MethodPost,
+ body: "not valid json",
+ wantStatusCode: http.StatusBadRequest,
+ checkResponse: false,
+ },
+ {
+ name: "valid CREATE - allowed",
+ method: http.MethodPost,
+ body: &admissionv1.AdmissionReview{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "admission.k8s.io/v1",
+ Kind: "AdmissionReview",
+ },
+ Request: &admissionv1.AdmissionRequest{
+ UID: "test-uid-1",
+ Kind: metav1.GroupVersionKind{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Kind: "Standalone",
+ },
+ Resource: metav1.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "standalones",
+ },
+ Name: "test-standalone",
+ Namespace: "default",
+ Operation: admissionv1.Create,
+ Object: runtime.RawExtension{
+ Raw: mustMarshal(&enterpriseApi.Standalone{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "enterprise.splunk.com/v4",
+ Kind: "Standalone",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-standalone",
+ Namespace: "default",
+ },
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ },
+ }),
+ },
+ UserInfo: authenticationv1.UserInfo{Username: "test-user"},
+ },
+ },
+ wantStatusCode: http.StatusOK,
+ wantAllowed: true,
+ checkResponse: true,
+ },
+ {
+ name: "invalid CREATE - negative replicas",
+ method: http.MethodPost,
+ body: &admissionv1.AdmissionReview{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "admission.k8s.io/v1",
+ Kind: "AdmissionReview",
+ },
+ Request: &admissionv1.AdmissionRequest{
+ UID: "test-uid-2",
+ Kind: metav1.GroupVersionKind{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Kind: "Standalone",
+ },
+ Resource: metav1.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "standalones",
+ },
+ Name: "test-standalone",
+ Namespace: "default",
+ Operation: admissionv1.Create,
+ Object: runtime.RawExtension{
+ Raw: mustMarshal(&enterpriseApi.Standalone{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "enterprise.splunk.com/v4",
+ Kind: "Standalone",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-standalone",
+ Namespace: "default",
+ },
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: -1,
+ },
+ }),
+ },
+ UserInfo: authenticationv1.UserInfo{Username: "test-user"},
+ },
+ },
+ wantStatusCode: http.StatusOK,
+ wantAllowed: false,
+ checkResponse: true,
+ },
+ {
+ name: "unknown resource - no validator - rejected",
+ method: http.MethodPost,
+ body: &admissionv1.AdmissionReview{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "admission.k8s.io/v1",
+ Kind: "AdmissionReview",
+ },
+ Request: &admissionv1.AdmissionRequest{
+ UID: "test-uid-3",
+ Kind: metav1.GroupVersionKind{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Kind: "Unknown",
+ },
+ Resource: metav1.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "unknowns",
+ },
+ Name: "test-unknown",
+ Namespace: "default",
+ Operation: admissionv1.Create,
+ Object: runtime.RawExtension{
+ Raw: []byte(`{"apiVersion":"enterprise.splunk.com/v4","kind":"Unknown"}`),
+ },
+ UserInfo: authenticationv1.UserInfo{Username: "test-user"},
+ },
+ },
+ wantStatusCode: http.StatusOK,
+ wantAllowed: false,
+ checkResponse: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var bodyBytes []byte
+ var err error
+
+ if tt.body != nil {
+ switch v := tt.body.(type) {
+ case string:
+ bodyBytes = []byte(v)
+ default:
+ bodyBytes, err = json.Marshal(tt.body)
+ if err != nil {
+ t.Fatalf("failed to marshal body: %v", err)
+ }
+ }
+ }
+
+ req := httptest.NewRequest(tt.method, "/validate", bytes.NewReader(bodyBytes))
+ req.Header.Set("Content-Type", "application/json")
+
+ rr := httptest.NewRecorder()
+ server.handleValidate(rr, req)
+
+ if rr.Code != tt.wantStatusCode {
+ t.Errorf("expected status code %d, got %d", tt.wantStatusCode, rr.Code)
+ }
+
+ if tt.checkResponse {
+ var response admissionv1.AdmissionReview
+ if err := json.Unmarshal(rr.Body.Bytes(), &response); err != nil {
+ t.Fatalf("failed to unmarshal response: %v", err)
+ }
+
+ if response.Response == nil {
+ t.Fatal("expected non-nil response")
+ }
+
+ if response.Response.Allowed != tt.wantAllowed {
+ t.Errorf("expected allowed=%v, got %v", tt.wantAllowed, response.Response.Allowed)
+ }
+ }
+ })
+ }
+}
+
+func TestHandleReadyz(t *testing.T) {
+ server := NewWebhookServer(WebhookServerOptions{
+ Port: 9443,
+ })
+
+ tests := []struct {
+ name string
+ method string
+ wantStatusCode int
+ wantBody string
+ }{
+ {
+ name: "GET request",
+ method: http.MethodGet,
+ wantStatusCode: http.StatusOK,
+ wantBody: "ok",
+ },
+ {
+ name: "POST request",
+ method: http.MethodPost,
+ wantStatusCode: http.StatusOK,
+ wantBody: "ok",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ req := httptest.NewRequest(tt.method, "/readyz", nil)
+ rr := httptest.NewRecorder()
+
+ server.handleReadyz(rr, req)
+
+ if rr.Code != tt.wantStatusCode {
+ t.Errorf("expected status code %d, got %d", tt.wantStatusCode, rr.Code)
+ }
+
+ if rr.Body.String() != tt.wantBody {
+ t.Errorf("expected body %q, got %q", tt.wantBody, rr.Body.String())
+ }
+ })
+ }
+}
+
+func TestHandleValidateWithWarnings(t *testing.T) {
+ validators := map[schema.GroupVersionResource]Validator{
+ StandaloneGVR: &GenericValidator[*enterpriseApi.Standalone]{
+ ValidateCreateFunc: func(obj *enterpriseApi.Standalone) field.ErrorList {
+ return nil
+ },
+ WarningsOnCreateFunc: func(obj *enterpriseApi.Standalone) []string {
+ return []string{"test warning 1", "test warning 2"}
+ },
+ GroupKind: schema.GroupKind{Group: "enterprise.splunk.com", Kind: "Standalone"},
+ },
+ }
+
+ server := NewWebhookServer(WebhookServerOptions{
+ Port: 9443,
+ Validators: validators,
+ })
+
+ ar := &admissionv1.AdmissionReview{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "admission.k8s.io/v1",
+ Kind: "AdmissionReview",
+ },
+ Request: &admissionv1.AdmissionRequest{
+ UID: "test-uid-warnings",
+ Kind: metav1.GroupVersionKind{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Kind: "Standalone",
+ },
+ Resource: metav1.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "standalones",
+ },
+ Name: "test-standalone",
+ Namespace: "default",
+ Operation: admissionv1.Create,
+ Object: runtime.RawExtension{
+ Raw: mustMarshal(&enterpriseApi.Standalone{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "enterprise.splunk.com/v4",
+ Kind: "Standalone",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-standalone",
+ Namespace: "default",
+ },
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ },
+ }),
+ },
+ UserInfo: authenticationv1.UserInfo{Username: "test-user"},
+ },
+ }
+
+ bodyBytes, _ := json.Marshal(ar)
+ req := httptest.NewRequest(http.MethodPost, "/validate", bytes.NewReader(bodyBytes))
+ req.Header.Set("Content-Type", "application/json")
+
+ rr := httptest.NewRecorder()
+ server.handleValidate(rr, req)
+
+ if rr.Code != http.StatusOK {
+ t.Errorf("expected status code %d, got %d", http.StatusOK, rr.Code)
+ }
+
+ var response admissionv1.AdmissionReview
+ if err := json.Unmarshal(rr.Body.Bytes(), &response); err != nil {
+ t.Fatalf("failed to unmarshal response: %v", err)
+ }
+
+ if !response.Response.Allowed {
+ t.Error("expected allowed=true")
+ }
+
+ if len(response.Response.Warnings) != 2 {
+ t.Errorf("expected 2 warnings, got %d", len(response.Response.Warnings))
+ }
+}
+
+func TestWebhookServerOptions(t *testing.T) {
+ tests := []struct {
+ name string
+ options WebhookServerOptions
+ wantPort int
+ }{
+ {
+ name: "default options",
+ options: WebhookServerOptions{
+ Port: 9443,
+ },
+ wantPort: 9443,
+ },
+ {
+ name: "custom port",
+ options: WebhookServerOptions{
+ Port: 8443,
+ },
+ wantPort: 8443,
+ },
+ {
+ name: "with cert paths",
+ options: WebhookServerOptions{
+ Port: 9443,
+ TLSCertFile: "/path/to/cert.pem",
+ TLSKeyFile: "/path/to/key.pem",
+ },
+ wantPort: 9443,
+ },
+ {
+ name: "with cert dir",
+ options: WebhookServerOptions{
+ Port: 9443,
+ CertDir: "/tmp/k8s-webhook-server/serving-certs",
+ },
+ wantPort: 9443,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ server := NewWebhookServer(tt.options)
+ if server.options.Port != tt.wantPort {
+ t.Errorf("expected port %d, got %d", tt.wantPort, server.options.Port)
+ }
+ })
+ }
+}
+
+// Helper functions
+
+func mustMarshal(obj interface{}) []byte {
+ data, err := json.Marshal(obj)
+ if err != nil {
+ panic(err)
+ }
+ return data
+}
diff --git a/pkg/splunk/enterprise/validation/standalone_validation.go b/pkg/splunk/enterprise/validation/standalone_validation.go
new file mode 100644
index 000000000..c1ac50516
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/standalone_validation.go
@@ -0,0 +1,67 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+)
+
+// ValidateStandaloneCreate validates a Standalone on CREATE
+func ValidateStandaloneCreate(obj *enterpriseApi.Standalone) field.ErrorList {
+ var allErrs field.ErrorList
+
+ // Validate replicas
+ if obj.Spec.Replicas < 0 {
+ allErrs = append(allErrs, field.Invalid(
+ field.NewPath("spec").Child("replicas"),
+ obj.Spec.Replicas,
+ "replicas must be non-negative"))
+ }
+
+ // Validate common spec
+ allErrs = append(allErrs, validateCommonSplunkSpec(&obj.Spec.CommonSplunkSpec, field.NewPath("spec"))...)
+
+ // Validate SmartStore only if user provided config
+ if len(obj.Spec.SmartStore.VolList) > 0 || len(obj.Spec.SmartStore.IndexList) > 0 {
+ allErrs = append(allErrs, validateSmartStore(&obj.Spec.SmartStore, field.NewPath("spec").Child("smartstore"))...)
+ }
+
+ // Validate AppFramework only if user provided config
+ if len(obj.Spec.AppFrameworkConfig.VolList) > 0 || len(obj.Spec.AppFrameworkConfig.AppSources) > 0 {
+ allErrs = append(allErrs, validateAppFramework(&obj.Spec.AppFrameworkConfig, field.NewPath("spec").Child("appRepo"))...)
+ }
+
+ return allErrs
+}
+
+// ValidateStandaloneUpdate validates a Standalone on UPDATE
+// TODO: Add immutable field validation here (e.g., compare obj vs oldObj for fields that cannot change after creation)
+func ValidateStandaloneUpdate(obj, oldObj *enterpriseApi.Standalone) field.ErrorList {
+ return ValidateStandaloneCreate(obj)
+}
+
+// GetStandaloneWarningsOnCreate returns warnings for Standalone CREATE
+func GetStandaloneWarningsOnCreate(obj *enterpriseApi.Standalone) []string {
+ return getCommonWarnings(&obj.Spec.CommonSplunkSpec)
+}
+
+// GetStandaloneWarningsOnUpdate returns warnings for Standalone UPDATE
+func GetStandaloneWarningsOnUpdate(obj, oldObj *enterpriseApi.Standalone) []string {
+ return GetStandaloneWarningsOnCreate(obj)
+}
diff --git a/pkg/splunk/enterprise/validation/standalone_validation_test.go b/pkg/splunk/enterprise/validation/standalone_validation_test.go
new file mode 100644
index 000000000..a2fb9bfd1
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/standalone_validation_test.go
@@ -0,0 +1,234 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "testing"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateStandaloneCreate(t *testing.T) {
+ tests := []struct {
+ name string
+ obj *enterpriseApi.Standalone
+ wantErrCount int
+ wantErrField string
+ }{
+ {
+ name: "valid standalone - minimal",
+ obj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "valid standalone - zero replicas",
+ obj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 0,
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid standalone - negative replicas",
+ obj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: -1,
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.replicas",
+ },
+ {
+ name: "valid standalone - with SmartStore",
+ obj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ SmartStore: enterpriseApi.SmartStoreSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "vol1", Endpoint: "s3://bucket"},
+ },
+ IndexList: []enterpriseApi.IndexSpec{
+ {Name: "idx1", IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{VolName: "vol1"}},
+ },
+ },
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid standalone - SmartStore volume without name",
+ obj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ SmartStore: enterpriseApi.SmartStoreSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "", Endpoint: "s3://bucket"},
+ },
+ },
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.smartstore.volumes[0].name",
+ },
+ {
+ name: "valid standalone - with AppFramework",
+ obj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "appvol", Endpoint: "s3://apps"},
+ },
+ AppSources: []enterpriseApi.AppSourceSpec{
+ {Name: "apps", Location: "/apps"},
+ },
+ },
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid standalone - AppFramework source without name",
+ obj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{
+ AppSources: []enterpriseApi.AppSourceSpec{
+ {Name: "", Location: "/apps"},
+ },
+ },
+ },
+ },
+ wantErrCount: 1,
+ wantErrField: "spec.appRepo.appSources[0].name",
+ },
+ {
+ name: "invalid standalone - multiple errors",
+ obj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: -1,
+ SmartStore: enterpriseApi.SmartStoreSpec{
+ VolList: []enterpriseApi.VolumeSpec{
+ {Name: "", Endpoint: ""},
+ },
+ },
+ },
+ },
+ wantErrCount: 3, // negative replicas + missing name + missing endpoint/path
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := ValidateStandaloneCreate(tt.obj)
+ assert.Len(t, errs, tt.wantErrCount, "unexpected error count")
+ if tt.wantErrField != "" && len(errs) > 0 {
+ assert.Equal(t, tt.wantErrField, errs[0].Field, "unexpected error field")
+ }
+ })
+ }
+}
+
+func TestValidateStandaloneUpdate(t *testing.T) {
+ tests := []struct {
+ name string
+ obj *enterpriseApi.Standalone
+ oldObj *enterpriseApi.Standalone
+ wantErrCount int
+ }{
+ {
+ name: "valid update - same replicas",
+ obj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ },
+ },
+ oldObj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "valid update - increase replicas",
+ obj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 3,
+ },
+ },
+ oldObj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ },
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid update - negative replicas",
+ obj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: -1,
+ },
+ },
+ oldObj: &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ },
+ },
+ wantErrCount: 1,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := ValidateStandaloneUpdate(tt.obj, tt.oldObj)
+ assert.Len(t, errs, tt.wantErrCount, "unexpected error count")
+ })
+ }
+}
+
+func TestGetStandaloneWarningsOnCreate(t *testing.T) {
+ obj := &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ },
+ }
+ warnings := GetStandaloneWarningsOnCreate(obj)
+ assert.Empty(t, warnings, "expected no warnings")
+}
+
+func TestGetStandaloneWarningsOnUpdate(t *testing.T) {
+ obj := &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ },
+ }
+ oldObj := &enterpriseApi.Standalone{
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: 1,
+ },
+ }
+ warnings := GetStandaloneWarningsOnUpdate(obj, oldObj)
+ assert.Empty(t, warnings, "expected no warnings")
+}
diff --git a/pkg/splunk/enterprise/validation/validate.go b/pkg/splunk/enterprise/validation/validate.go
new file mode 100644
index 000000000..e2c8e6fa6
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/validate.go
@@ -0,0 +1,120 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "fmt"
+
+ admissionv1 "k8s.io/api/admission/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+)
+
+var (
+ scheme = runtime.NewScheme()
+ codecs serializer.CodecFactory
+)
+
+func init() {
+ _ = enterpriseApi.AddToScheme(scheme)
+ codecs = serializer.NewCodecFactory(scheme)
+}
+
+// Validate performs validation on an AdmissionReview request
+// Returns warnings (even on success) and an error if validation fails
+func Validate(ar *admissionv1.AdmissionReview, validators map[schema.GroupVersionResource]Validator) ([]string, error) {
+ if ar == nil || ar.Request == nil {
+ return nil, fmt.Errorf("admission review or request is nil")
+ }
+
+ req := ar.Request
+
+ // Extract GVR from request
+ gvr := schema.GroupVersionResource{
+ Group: req.Resource.Group,
+ Version: req.Resource.Version,
+ Resource: req.Resource.Resource,
+ }
+
+ // Lookup validator by GVR
+ validator, ok := validators[gvr]
+ if !ok {
+ return nil, fmt.Errorf("no validator registered for resource %s", gvr.String())
+ }
+
+ // Deserialize the object
+ obj, err := deserializeObject(req.Object.Raw)
+ if err != nil {
+ return nil, fmt.Errorf("failed to deserialize object: %w", err)
+ }
+
+ // Deserialize old object if present (for UPDATE operations)
+ var oldObj runtime.Object
+ if len(req.OldObject.Raw) > 0 {
+ oldObj, err = deserializeObject(req.OldObject.Raw)
+ if err != nil {
+ return nil, fmt.Errorf("failed to deserialize old object: %w", err)
+ }
+ }
+
+ var fieldErrs field.ErrorList
+ var warnings []string
+
+ // Perform validation based on operation
+ switch req.Operation {
+ case admissionv1.Create:
+ fieldErrs = validator.ValidateCreate(obj)
+ warnings = validator.GetWarningsOnCreate(obj)
+
+ case admissionv1.Update:
+ fieldErrs = validator.ValidateUpdate(obj, oldObj)
+ warnings = validator.GetWarningsOnUpdate(obj, oldObj)
+
+ default:
+ // For other operations (DELETE, CONNECT), allow by default
+ return nil, nil
+ }
+
+ // If there are validation errors, return an aggregate error
+ if len(fieldErrs) > 0 {
+ groupKind := validator.GetGroupKind(obj)
+ name := validator.GetName(obj)
+ return warnings, apierrors.NewInvalid(groupKind, name, fieldErrs)
+ }
+
+ return warnings, nil
+}
+
+// deserializeObject deserializes raw bytes into a runtime.Object
+func deserializeObject(raw []byte) (runtime.Object, error) {
+ if len(raw) == 0 {
+ return nil, fmt.Errorf("empty raw object")
+ }
+
+ decoder := codecs.UniversalDeserializer()
+ obj, _, err := decoder.Decode(raw, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return obj, nil
+}
diff --git a/pkg/splunk/enterprise/validation/validate_test.go b/pkg/splunk/enterprise/validation/validate_test.go
new file mode 100644
index 000000000..eeffaa452
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/validate_test.go
@@ -0,0 +1,315 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "encoding/json"
+ "testing"
+
+ admissionv1 "k8s.io/api/admission/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+)
+
+func createStandaloneJSON(name, namespace string, replicas int32) []byte {
+ standalone := &enterpriseApi.Standalone{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "enterprise.splunk.com/v4",
+ Kind: "Standalone",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ Spec: enterpriseApi.StandaloneSpec{
+ Replicas: replicas,
+ },
+ }
+ data, _ := json.Marshal(standalone)
+ return data
+}
+
+func createTestValidators() map[schema.GroupVersionResource]Validator {
+ return map[schema.GroupVersionResource]Validator{
+ StandaloneGVR: &GenericValidator[*enterpriseApi.Standalone]{
+ ValidateCreateFunc: func(obj *enterpriseApi.Standalone) field.ErrorList {
+ var errs field.ErrorList
+ if obj.Spec.Replicas < 0 {
+ errs = append(errs, field.Invalid(
+ field.NewPath("spec").Child("replicas"),
+ obj.Spec.Replicas,
+ "must be non-negative"))
+ }
+ return errs
+ },
+ ValidateUpdateFunc: func(obj, oldObj *enterpriseApi.Standalone) field.ErrorList {
+ var errs field.ErrorList
+ if obj.Spec.Replicas < 0 {
+ errs = append(errs, field.Invalid(
+ field.NewPath("spec").Child("replicas"),
+ obj.Spec.Replicas,
+ "must be non-negative"))
+ }
+ return errs
+ },
+ WarningsOnCreateFunc: func(obj *enterpriseApi.Standalone) []string {
+ if obj.Spec.Replicas > 10 {
+ return []string{"high replica count may impact performance"}
+ }
+ return nil
+ },
+ WarningsOnUpdateFunc: func(obj, oldObj *enterpriseApi.Standalone) []string {
+ return nil
+ },
+ GroupKind: schema.GroupKind{Group: "enterprise.splunk.com", Kind: "Standalone"},
+ },
+ }
+}
+
+func TestValidate(t *testing.T) {
+ validators := createTestValidators()
+
+ tests := []struct {
+ name string
+ ar *admissionv1.AdmissionReview
+ wantErr bool
+ wantWarnings int
+ }{
+ {
+ name: "nil admission review",
+ ar: nil,
+ wantErr: true,
+ },
+ {
+ name: "nil request",
+ ar: &admissionv1.AdmissionReview{
+ Request: nil,
+ },
+ wantErr: true,
+ },
+ {
+ name: "valid CREATE operation",
+ ar: &admissionv1.AdmissionReview{
+ Request: &admissionv1.AdmissionRequest{
+ UID: "test-uid",
+ Operation: admissionv1.Create,
+ Resource: metav1.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "standalones",
+ },
+ Object: runtime.RawExtension{
+ Raw: createStandaloneJSON("test", "default", 1),
+ },
+ },
+ },
+ wantErr: false,
+ wantWarnings: 0,
+ },
+ {
+ name: "invalid CREATE - negative replicas",
+ ar: &admissionv1.AdmissionReview{
+ Request: &admissionv1.AdmissionRequest{
+ UID: "test-uid",
+ Operation: admissionv1.Create,
+ Resource: metav1.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "standalones",
+ },
+ Object: runtime.RawExtension{
+ Raw: createStandaloneJSON("test", "default", -1),
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "CREATE with warnings",
+ ar: &admissionv1.AdmissionReview{
+ Request: &admissionv1.AdmissionRequest{
+ UID: "test-uid",
+ Operation: admissionv1.Create,
+ Resource: metav1.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "standalones",
+ },
+ Object: runtime.RawExtension{
+ Raw: createStandaloneJSON("test", "default", 15),
+ },
+ },
+ },
+ wantErr: false,
+ wantWarnings: 1,
+ },
+ {
+ name: "valid UPDATE operation",
+ ar: &admissionv1.AdmissionReview{
+ Request: &admissionv1.AdmissionRequest{
+ UID: "test-uid",
+ Operation: admissionv1.Update,
+ Resource: metav1.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "standalones",
+ },
+ Object: runtime.RawExtension{
+ Raw: createStandaloneJSON("test", "default", 2),
+ },
+ OldObject: runtime.RawExtension{
+ Raw: createStandaloneJSON("test", "default", 1),
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "DELETE operation - allowed by default",
+ ar: &admissionv1.AdmissionReview{
+ Request: &admissionv1.AdmissionRequest{
+ UID: "test-uid",
+ Operation: admissionv1.Delete,
+ Resource: metav1.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "standalones",
+ },
+ Object: runtime.RawExtension{
+ Raw: createStandaloneJSON("test", "default", 1),
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "unknown resource - no validator",
+ ar: &admissionv1.AdmissionReview{
+ Request: &admissionv1.AdmissionRequest{
+ UID: "test-uid",
+ Operation: admissionv1.Create,
+ Resource: metav1.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "unknownresources",
+ },
+ Object: runtime.RawExtension{
+ Raw: []byte(`{}`),
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "invalid JSON - deserialization error",
+ ar: &admissionv1.AdmissionReview{
+ Request: &admissionv1.AdmissionRequest{
+ UID: "test-uid",
+ Operation: admissionv1.Create,
+ Resource: metav1.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "standalones",
+ },
+ Object: runtime.RawExtension{
+ Raw: []byte(`{invalid json`),
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "empty object - deserialization error",
+ ar: &admissionv1.AdmissionReview{
+ Request: &admissionv1.AdmissionRequest{
+ UID: "test-uid",
+ Operation: admissionv1.Create,
+ Resource: metav1.GroupVersionResource{
+ Group: "enterprise.splunk.com",
+ Version: "v4",
+ Resource: "standalones",
+ },
+ Object: runtime.RawExtension{
+ Raw: []byte{},
+ },
+ },
+ },
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ warnings, err := Validate(tt.ar, validators)
+
+ if (err != nil) != tt.wantErr {
+ t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr)
+ }
+
+ if len(warnings) != tt.wantWarnings {
+ t.Errorf("Validate() warnings = %d, want %d", len(warnings), tt.wantWarnings)
+ }
+ })
+ }
+}
+
+func TestDeserializeObject(t *testing.T) {
+ tests := []struct {
+ name string
+ raw []byte
+ wantErr bool
+ }{
+ {
+ name: "valid standalone JSON",
+ raw: createStandaloneJSON("test", "default", 1),
+ wantErr: false,
+ },
+ {
+ name: "empty bytes",
+ raw: []byte{},
+ wantErr: true,
+ },
+ {
+ name: "nil bytes",
+ raw: nil,
+ wantErr: true,
+ },
+ {
+ name: "invalid JSON",
+ raw: []byte(`{not valid json`),
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ obj, err := deserializeObject(tt.raw)
+
+ if (err != nil) != tt.wantErr {
+ t.Errorf("deserializeObject() error = %v, wantErr %v", err, tt.wantErr)
+ }
+
+ if !tt.wantErr && obj == nil {
+ t.Error("deserializeObject() returned nil object without error")
+ }
+ })
+ }
+}
diff --git a/pkg/splunk/enterprise/validation/validator.go b/pkg/splunk/enterprise/validation/validator.go
new file mode 100644
index 000000000..3379602a8
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/validator.go
@@ -0,0 +1,164 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// ValidatableObject is the interface that CRD types must implement to be validated
+type ValidatableObject interface {
+ runtime.Object
+ GetName() string
+ GetNamespace() string
+ GetObjectKind() schema.ObjectKind
+}
+
+// Validator is the interface for validating Kubernetes objects
+type Validator interface {
+ // ValidateCreate validates an object on CREATE operation
+ ValidateCreate(obj runtime.Object) field.ErrorList
+
+ // ValidateUpdate validates an object on UPDATE operation
+ ValidateUpdate(obj, oldObj runtime.Object) field.ErrorList
+
+ // GetGroupKind returns the GroupKind for the object
+ GetGroupKind(obj runtime.Object) schema.GroupKind
+
+ // GetName returns the name of the object
+ GetName(obj runtime.Object) string
+
+ // GetWarningsOnCreate returns warnings for CREATE operation
+ GetWarningsOnCreate(obj runtime.Object) []string
+
+ // GetWarningsOnUpdate returns warnings for UPDATE operation
+ GetWarningsOnUpdate(obj, oldObj runtime.Object) []string
+}
+
+// GenericValidator is a type-safe wrapper for validating specific CRD types
+type GenericValidator[T ValidatableObject] struct {
+ // ValidateCreateFunc is the function to validate on CREATE
+ ValidateCreateFunc func(obj T) field.ErrorList
+
+ // ValidateUpdateFunc is the function to validate on UPDATE
+ ValidateUpdateFunc func(obj, oldObj T) field.ErrorList
+
+ // WarningsOnCreateFunc returns warnings on CREATE
+ WarningsOnCreateFunc func(obj T) []string
+
+ // WarningsOnUpdateFunc returns warnings on UPDATE
+ WarningsOnUpdateFunc func(obj, oldObj T) []string
+
+ // GroupKind is the GroupKind for this validator
+ GroupKind schema.GroupKind
+}
+
+// ValidateCreate implements Validator interface
+func (v *GenericValidator[T]) ValidateCreate(obj runtime.Object) field.ErrorList {
+ if v.ValidateCreateFunc == nil {
+ return nil
+ }
+ typedObj, ok := obj.(T)
+ if !ok {
+ return field.ErrorList{field.InternalError(nil,
+ &TypeAssertionError{Expected: new(T), Actual: obj})}
+ }
+ return v.ValidateCreateFunc(typedObj)
+}
+
+// ValidateUpdate implements Validator interface
+func (v *GenericValidator[T]) ValidateUpdate(obj, oldObj runtime.Object) field.ErrorList {
+ if v.ValidateUpdateFunc == nil {
+ return nil
+ }
+ typedObj, ok := obj.(T)
+ if !ok {
+ return field.ErrorList{field.InternalError(nil,
+ &TypeAssertionError{Expected: new(T), Actual: obj})}
+ }
+ typedOldObj, ok := oldObj.(T)
+ if !ok {
+ return field.ErrorList{field.InternalError(nil,
+ &TypeAssertionError{Expected: new(T), Actual: oldObj})}
+ }
+ return v.ValidateUpdateFunc(typedObj, typedOldObj)
+}
+
+// GetGroupKind implements Validator interface
+func (v *GenericValidator[T]) GetGroupKind(obj runtime.Object) schema.GroupKind {
+ return v.GroupKind
+}
+
+// GetName implements Validator interface
+func (v *GenericValidator[T]) GetName(obj runtime.Object) string {
+ typedObj, ok := obj.(T)
+ if !ok {
+ return ""
+ }
+ return typedObj.GetName()
+}
+
+// GetWarningsOnCreate implements Validator interface
+func (v *GenericValidator[T]) GetWarningsOnCreate(obj runtime.Object) []string {
+ if v.WarningsOnCreateFunc == nil {
+ return nil
+ }
+ typedObj, ok := obj.(T)
+ if !ok {
+ return nil
+ }
+ return v.WarningsOnCreateFunc(typedObj)
+}
+
+// GetWarningsOnUpdate implements Validator interface
+func (v *GenericValidator[T]) GetWarningsOnUpdate(obj, oldObj runtime.Object) []string {
+ if v.WarningsOnUpdateFunc == nil {
+ return nil
+ }
+ typedObj, ok := obj.(T)
+ if !ok {
+ return nil
+ }
+ typedOldObj, ok := oldObj.(T)
+ if !ok {
+ return nil
+ }
+ return v.WarningsOnUpdateFunc(typedObj, typedOldObj)
+}
+
+// Ensure GenericValidator implements Validator
+var _ Validator = &GenericValidator[*dummyObject]{}
+
+// dummyObject is used only for compile-time interface check
+type dummyObject struct{}
+
+func (d *dummyObject) GetName() string { return "" }
+func (d *dummyObject) GetNamespace() string { return "" }
+func (d *dummyObject) GetObjectKind() schema.ObjectKind { return nil }
+func (d *dummyObject) DeepCopyObject() runtime.Object { return d }
+
+// TypeAssertionError is returned when type assertion fails
+type TypeAssertionError struct {
+ Expected interface{}
+ Actual interface{}
+}
+
+func (e *TypeAssertionError) Error() string {
+ return "type assertion failed"
+}
diff --git a/pkg/splunk/enterprise/validation/validator_test.go b/pkg/splunk/enterprise/validation/validator_test.go
new file mode 100644
index 000000000..d4ad55e90
--- /dev/null
+++ b/pkg/splunk/enterprise/validation/validator_test.go
@@ -0,0 +1,347 @@
+/*
+Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "testing"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+)
+
+func TestGenericValidatorValidateCreate(t *testing.T) {
+ tests := []struct {
+ name string
+ validator *GenericValidator[*enterpriseApi.Standalone]
+ obj runtime.Object
+ wantErrCount int
+ wantInternalErr bool
+ }{
+ {
+ name: "valid standalone - no errors",
+ validator: &GenericValidator[*enterpriseApi.Standalone]{
+ ValidateCreateFunc: func(obj *enterpriseApi.Standalone) field.ErrorList {
+ return nil
+ },
+ GroupKind: schema.GroupKind{Group: "enterprise.splunk.com", Kind: "Standalone"},
+ },
+ obj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ Spec: enterpriseApi.StandaloneSpec{Replicas: 1},
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid standalone - validation error",
+ validator: &GenericValidator[*enterpriseApi.Standalone]{
+ ValidateCreateFunc: func(obj *enterpriseApi.Standalone) field.ErrorList {
+ return field.ErrorList{
+ field.Invalid(field.NewPath("spec").Child("replicas"), obj.Spec.Replicas, "must be positive"),
+ }
+ },
+ GroupKind: schema.GroupKind{Group: "enterprise.splunk.com", Kind: "Standalone"},
+ },
+ obj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ Spec: enterpriseApi.StandaloneSpec{Replicas: -1},
+ },
+ wantErrCount: 1,
+ },
+ {
+ name: "nil ValidateCreateFunc - returns nil",
+ validator: &GenericValidator[*enterpriseApi.Standalone]{
+ ValidateCreateFunc: nil,
+ GroupKind: schema.GroupKind{Group: "enterprise.splunk.com", Kind: "Standalone"},
+ },
+ obj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "wrong type - internal error",
+ validator: &GenericValidator[*enterpriseApi.Standalone]{
+ ValidateCreateFunc: func(obj *enterpriseApi.Standalone) field.ErrorList {
+ return nil
+ },
+ GroupKind: schema.GroupKind{Group: "enterprise.splunk.com", Kind: "Standalone"},
+ },
+ obj: &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ },
+ wantErrCount: 1,
+ wantInternalErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := tt.validator.ValidateCreate(tt.obj)
+ if len(errs) != tt.wantErrCount {
+ t.Errorf("ValidateCreate() got %d errors, want %d", len(errs), tt.wantErrCount)
+ }
+ if tt.wantInternalErr && len(errs) > 0 {
+ if errs[0].Type != field.ErrorTypeInternal {
+ t.Errorf("ValidateCreate() expected internal error, got %v", errs[0].Type)
+ }
+ }
+ })
+ }
+}
+
+func TestGenericValidatorValidateUpdate(t *testing.T) {
+ tests := []struct {
+ name string
+ validator *GenericValidator[*enterpriseApi.Standalone]
+ obj runtime.Object
+ oldObj runtime.Object
+ wantErrCount int
+ }{
+ {
+ name: "valid update - no errors",
+ validator: &GenericValidator[*enterpriseApi.Standalone]{
+ ValidateUpdateFunc: func(obj, oldObj *enterpriseApi.Standalone) field.ErrorList {
+ return nil
+ },
+ GroupKind: schema.GroupKind{Group: "enterprise.splunk.com", Kind: "Standalone"},
+ },
+ obj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ Spec: enterpriseApi.StandaloneSpec{Replicas: 2},
+ },
+ oldObj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ Spec: enterpriseApi.StandaloneSpec{Replicas: 1},
+ },
+ wantErrCount: 0,
+ },
+ {
+ name: "invalid update - immutable field changed",
+ validator: &GenericValidator[*enterpriseApi.Standalone]{
+ ValidateUpdateFunc: func(obj, oldObj *enterpriseApi.Standalone) field.ErrorList {
+ if obj.Name != oldObj.Name {
+ return field.ErrorList{
+ field.Forbidden(field.NewPath("metadata").Child("name"), "field is immutable"),
+ }
+ }
+ return nil
+ },
+ GroupKind: schema.GroupKind{Group: "enterprise.splunk.com", Kind: "Standalone"},
+ },
+ obj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-new", Namespace: "default"},
+ },
+ oldObj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-old", Namespace: "default"},
+ },
+ wantErrCount: 1,
+ },
+ {
+ name: "nil ValidateUpdateFunc - returns nil",
+ validator: &GenericValidator[*enterpriseApi.Standalone]{
+ ValidateUpdateFunc: nil,
+ GroupKind: schema.GroupKind{Group: "enterprise.splunk.com", Kind: "Standalone"},
+ },
+ obj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ },
+ oldObj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ },
+ wantErrCount: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := tt.validator.ValidateUpdate(tt.obj, tt.oldObj)
+ if len(errs) != tt.wantErrCount {
+ t.Errorf("ValidateUpdate() got %d errors, want %d", len(errs), tt.wantErrCount)
+ }
+ })
+ }
+}
+
+func TestGenericValidatorGetGroupKind(t *testing.T) {
+ validator := &GenericValidator[*enterpriseApi.Standalone]{
+ GroupKind: schema.GroupKind{Group: "enterprise.splunk.com", Kind: "Standalone"},
+ }
+
+ obj := &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ }
+
+ gk := validator.GetGroupKind(obj)
+ if gk.Group != "enterprise.splunk.com" {
+ t.Errorf("GetGroupKind() group = %s, want enterprise.splunk.com", gk.Group)
+ }
+ if gk.Kind != "Standalone" {
+ t.Errorf("GetGroupKind() kind = %s, want Standalone", gk.Kind)
+ }
+}
+
+func TestGenericValidatorGetName(t *testing.T) {
+ validator := &GenericValidator[*enterpriseApi.Standalone]{
+ GroupKind: schema.GroupKind{Group: "enterprise.splunk.com", Kind: "Standalone"},
+ }
+
+ tests := []struct {
+ name string
+ obj runtime.Object
+ wantName string
+ }{
+ {
+ name: "valid object",
+ obj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "my-standalone", Namespace: "default"},
+ },
+ wantName: "my-standalone",
+ },
+ {
+ name: "wrong type - returns empty",
+ obj: &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "my-indexer", Namespace: "default"},
+ },
+ wantName: "",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ name := validator.GetName(tt.obj)
+ if name != tt.wantName {
+ t.Errorf("GetName() = %s, want %s", name, tt.wantName)
+ }
+ })
+ }
+}
+
+func TestGenericValidatorGetWarningsOnCreate(t *testing.T) {
+ tests := []struct {
+ name string
+ validator *GenericValidator[*enterpriseApi.Standalone]
+ obj runtime.Object
+ wantWarnings int
+ }{
+ {
+ name: "returns warnings",
+ validator: &GenericValidator[*enterpriseApi.Standalone]{
+ WarningsOnCreateFunc: func(obj *enterpriseApi.Standalone) []string {
+ return []string{"warning1", "warning2"}
+ },
+ },
+ obj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ },
+ wantWarnings: 2,
+ },
+ {
+ name: "nil func - returns nil",
+ validator: &GenericValidator[*enterpriseApi.Standalone]{
+ WarningsOnCreateFunc: nil,
+ },
+ obj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ },
+ wantWarnings: 0,
+ },
+ {
+ name: "wrong type - returns nil",
+ validator: &GenericValidator[*enterpriseApi.Standalone]{
+ WarningsOnCreateFunc: func(obj *enterpriseApi.Standalone) []string {
+ return []string{"warning"}
+ },
+ },
+ obj: &enterpriseApi.IndexerCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ },
+ wantWarnings: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ warnings := tt.validator.GetWarningsOnCreate(tt.obj)
+ if len(warnings) != tt.wantWarnings {
+ t.Errorf("GetWarningsOnCreate() got %d warnings, want %d", len(warnings), tt.wantWarnings)
+ }
+ })
+ }
+}
+
+func TestGenericValidatorGetWarningsOnUpdate(t *testing.T) {
+ tests := []struct {
+ name string
+ validator *GenericValidator[*enterpriseApi.Standalone]
+ obj runtime.Object
+ oldObj runtime.Object
+ wantWarnings int
+ }{
+ {
+ name: "returns warnings",
+ validator: &GenericValidator[*enterpriseApi.Standalone]{
+ WarningsOnUpdateFunc: func(obj, oldObj *enterpriseApi.Standalone) []string {
+ return []string{"update warning"}
+ },
+ },
+ obj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ },
+ oldObj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ },
+ wantWarnings: 1,
+ },
+ {
+ name: "nil func - returns nil",
+ validator: &GenericValidator[*enterpriseApi.Standalone]{
+ WarningsOnUpdateFunc: nil,
+ },
+ obj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ },
+ oldObj: &enterpriseApi.Standalone{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"},
+ },
+ wantWarnings: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ warnings := tt.validator.GetWarningsOnUpdate(tt.obj, tt.oldObj)
+ if len(warnings) != tt.wantWarnings {
+ t.Errorf("GetWarningsOnUpdate() got %d warnings, want %d", len(warnings), tt.wantWarnings)
+ }
+ })
+ }
+}
+
+func TestTypeAssertionError(t *testing.T) {
+ err := &TypeAssertionError{
+ Expected: &enterpriseApi.Standalone{},
+ Actual: &enterpriseApi.IndexerCluster{},
+ }
+
+ if err.Error() != "type assertion failed" {
+ t.Errorf("TypeAssertionError.Error() = %s, want 'type assertion failed'", err.Error())
+ }
+}
diff --git a/pkg/splunk/splkcontroller/configmap.go b/pkg/splunk/splkcontroller/configmap.go
index 13de3f7ba..2d27a4adb 100644
--- a/pkg/splunk/splkcontroller/configmap.go
+++ b/pkg/splunk/splkcontroller/configmap.go
@@ -43,7 +43,7 @@ func ApplyConfigMap(ctx context.Context, client splcommon.ControllerClient, conf
err := client.Get(context.TODO(), namespacedName, ¤t)
- // dataUpdated flag returns if the data on the configMap has been succesfully updated
+ // dataUpdated flag returns if the data on the configMap has been successfully updated
dataUpdated := false
if err == nil {
// updateNeeded flag indicates if an update to the configMap is necessary
diff --git a/pkg/splunk/splkcontroller/controller.go b/pkg/splunk/splkcontroller/controller.go
index efbd33608..34389d7ab 100644
--- a/pkg/splunk/splkcontroller/controller.go
+++ b/pkg/splunk/splkcontroller/controller.go
@@ -17,6 +17,7 @@ package splkcontroller
import (
"context"
+
splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
"k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/pkg/splunk/splkcontroller/controller_test.go b/pkg/splunk/splkcontroller/controller_test.go
index d0f6acfaf..431473d49 100644
--- a/pkg/splunk/splkcontroller/controller_test.go
+++ b/pkg/splunk/splkcontroller/controller_test.go
@@ -18,11 +18,12 @@ package splkcontroller
import (
"context"
"errors"
- "k8s.io/client-go/kubernetes/scheme"
"net/http"
+ "testing"
+
+ "k8s.io/client-go/kubernetes/scheme"
ctrl2 "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/config"
- "testing"
"github.com/go-logr/logr"
splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
diff --git a/pkg/splunk/splkcontroller/doc.go b/pkg/splunk/splkcontroller/doc.go
index 31b1420db..5336be89f 100644
--- a/pkg/splunk/splkcontroller/doc.go
+++ b/pkg/splunk/splkcontroller/doc.go
@@ -15,7 +15,7 @@
/*
Package controller is used to manipulate Kubernetes resources using its REST API.
-This package has no depedencies outside of the standard go and kubernetes libraries,
+This package has no dependencies outside of the standard go and kubernetes libraries,
and the splunk.common package.
*/
package splkcontroller
diff --git a/pkg/splunk/test/controller.go b/pkg/splunk/test/controller.go
index 6e5871cc4..6d43fa149 100644
--- a/pkg/splunk/test/controller.go
+++ b/pkg/splunk/test/controller.go
@@ -349,11 +349,32 @@ func (c MockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Ob
return nil
}
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ if gvk.Empty() {
+ // Infer GVK from object type
+ typeName := reflect.TypeOf(obj).Elem().Name()
+ // Determine group based on type
+ var group, version string
+ switch obj.(type) {
+ case *corev1.Pod, *corev1.Service, *corev1.ConfigMap, *corev1.Secret:
+ group, version = "", "v1"
+ case *appsv1.StatefulSet, *appsv1.Deployment:
+ group, version = "apps", "v1"
+ default:
+ group, version = "enterprise.splunk.com", "v4"
+ }
+ gvk = schema.GroupVersionKind{
+ Group: group,
+ Version: version,
+ Kind: typeName,
+ }
+ }
+
dummySchemaResource := schema.GroupResource{
- Group: obj.GetObjectKind().GroupVersionKind().Group,
- Resource: obj.GetObjectKind().GroupVersionKind().Kind,
+ Group: gvk.Group,
+ Resource: gvk.Kind,
}
- c.NotFoundError = k8serrors.NewNotFound(dummySchemaResource, obj.GetName())
+ c.NotFoundError = k8serrors.NewNotFound(dummySchemaResource, key.Name)
return c.NotFoundError
}
@@ -368,13 +389,54 @@ func (c MockClient) List(ctx context.Context, obj client.ObjectList, opts ...cli
ListOpts: opts,
ObjList: obj,
})
- listObj := c.ListObj
- if listObj != nil {
- srcObj := listObj
- copyMockObjectList(&obj, &srcObj)
- return nil
+
+ // Only handle PodList for this test
+ podList, ok := obj.(*corev1.PodList)
+ if !ok {
+ // fallback to old logic
+ listObj := c.ListObj
+ if listObj != nil {
+ srcObj := listObj
+ copyMockObjectList(&obj, &srcObj)
+ return nil
+ }
+ return c.NotFoundError
+ }
+
+ // Gather label selector and namespace from opts
+ var ns string
+ var matchLabels map[string]string
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case client.InNamespace:
+ ns = string(v)
+ case client.MatchingLabels:
+ matchLabels = v
+ }
}
- return c.NotFoundError
+
+ // Filter pods in State
+ for _, v := range c.State {
+ pod, ok := v.(*corev1.Pod)
+ if !ok {
+ continue
+ }
+ if ns != "" && pod.Namespace != ns {
+ continue
+ }
+ matches := true
+ for k, val := range matchLabels {
+ if pod.Labels[k] != val {
+ matches = false
+ break
+ }
+ }
+ if matches {
+ podList.Items = append(podList.Items, *pod)
+ }
+ }
+
+ return nil
}
// Create returns mock client's Err field
diff --git a/pkg/splunk/util/messages.go b/pkg/splunk/util/messages.go
index bd33b0dd8..f7b54989e 100644
--- a/pkg/splunk/util/messages.go
+++ b/pkg/splunk/util/messages.go
@@ -52,8 +52,4 @@ const (
// emptySecretVolumeSource indicates an empty
emptySecretVolumeSource = "didn't find secret volume source in any pod volume"
-
- // splunkSSHWarningMessage Note: splunk 9.0 throws warning message "warning: server certificate hostname validation is disabled. please see server.conf/[sslconfig]/cliverifyservername for details.\n"
- // we are supressing the message
- splunkSSHWarningMessage = "WARNING: Server Certificate Hostname Validation is disabled. Please see server.conf/[sslConfig]/cliVerifyServerName for details.\n"
)
diff --git a/pkg/splunk/util/secrets.go b/pkg/splunk/util/secrets.go
index e2caa1842..2be12f503 100644
--- a/pkg/splunk/util/secrets.go
+++ b/pkg/splunk/util/secrets.go
@@ -36,7 +36,7 @@ import (
)
// GetSpecificSecretTokenFromPod retrieves a specific secret token's value from a Pod
-func GetSpecificSecretTokenFromPod(ctx context.Context, c splcommon.ControllerClient, PodName string, namespace string, secretToken string) (string, error) {
+var GetSpecificSecretTokenFromPod = func(ctx context.Context, c splcommon.ControllerClient, PodName string, namespace string, secretToken string) (string, error) {
logger := logging.FromContext(ctx).With("func", "GetSpecificSecretTokenFromPod")
logger.DebugContext(ctx, "Retrieving secret token from pod",
"pod", PodName,
diff --git a/pkg/splunk/util/util.go b/pkg/splunk/util/util.go
index a393d7703..57a6442b0 100644
--- a/pkg/splunk/util/util.go
+++ b/pkg/splunk/util/util.go
@@ -228,8 +228,8 @@ type PodExecClient struct {
targetPodName string
}
-// GetPodExecClient returns the client object used to execute pod exec commands
-func GetPodExecClient(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) *PodExecClient {
+// getPodExecClientImpl is the actual implementation that creates a real PodExecClient
+func getPodExecClientImpl(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) PodExecClientImpl {
return &PodExecClient{
client: client,
cr: cr,
@@ -238,6 +238,10 @@ func GetPodExecClient(client splcommon.ControllerClient, cr splcommon.MetaObject
}
}
+// GetPodExecClient is a var that can be mocked in tests to return a mock PodExecClient
+// By default it returns a real PodExecClient
+var GetPodExecClient = getPodExecClientImpl
+
// suppressHarmlessErrorMessages suppresses harmless error messages
func suppressHarmlessErrorMessages(values ...*string) {
for _, val := range values {
diff --git a/pkg/splunk/util/util_test.go b/pkg/splunk/util/util_test.go
index 5e61f1676..df414f245 100644
--- a/pkg/splunk/util/util_test.go
+++ b/pkg/splunk/util/util_test.go
@@ -52,9 +52,7 @@ var fakePodExecRESTClientForGVK = func(gvk schema.GroupVersionKind, isUnstructur
return &fakeRestInterface{}, errors.New("fakeerror")
}
-type fakeRestInterface struct {
- name string
-}
+type fakeRestInterface struct{}
func (fri fakeRestInterface) GetRateLimiter() flowcontrol.RateLimiter {
return flowcontrol.NewFakeAlwaysRateLimiter()
@@ -214,6 +212,18 @@ func TestDeepCopy(t *testing.T) {
func TestPodExecCommand(t *testing.T) {
ctx := context.TODO()
+
+ // Mock podExecGetConfig to return a config with localhost as server
+ // This prevents the test from trying to connect to a real Kubernetes cluster
+ // and timing out. Instead, it will fail fast with connection refused.
+ savedPodExecGetConfig := podExecGetConfig
+ defer func() { podExecGetConfig = savedPodExecGetConfig }()
+ podExecGetConfig = func() (*rest.Config, error) {
+ return &rest.Config{
+ Host: "http://127.0.0.1:1", // Use invalid port for fast failure
+ }, nil
+ }
+
// Create pod
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
diff --git a/test/appframework_aws/c3/appframework_aws_test.go b/test/appframework_aws/c3/appframework_aws_test.go
index cd241e2eb..2d150f5ac 100644
--- a/test/appframework_aws/c3/appframework_aws_test.go
+++ b/test/appframework_aws/c3/appframework_aws_test.go
@@ -3182,7 +3182,7 @@ var _ = Describe("c3appfw test", func() {
// Deploy the Indexer Cluster
testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster")
indexerReplicas := 3
- _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "")
+ _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "")
Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster")
// Deploy the Search Head Cluster
diff --git a/test/appframework_aws/c3/manager_appframework_test.go b/test/appframework_aws/c3/manager_appframework_test.go
index 826c48d93..ba0b3e8ea 100644
--- a/test/appframework_aws/c3/manager_appframework_test.go
+++ b/test/appframework_aws/c3/manager_appframework_test.go
@@ -355,7 +355,7 @@ var _ = Describe("c3appfw test", func() {
shcName := fmt.Sprintf("%s-shc", deployment.GetName())
idxName := fmt.Sprintf("%s-idxc", deployment.GetName())
shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName)
- idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "")
+ idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "")
// Wait for License Manager to be in READY phase
testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst)
@@ -3320,7 +3320,7 @@ var _ = Describe("c3appfw test", func() {
// Deploy the Indexer Cluster
testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster")
indexerReplicas := 3
- _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "")
+ _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "")
Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster")
// Deploy the Search Head Cluster
diff --git a/test/appframework_az/c3/appframework_azure_test.go b/test/appframework_az/c3/appframework_azure_test.go
index a79d4941a..c7fea6ff3 100644
--- a/test/appframework_az/c3/appframework_azure_test.go
+++ b/test/appframework_az/c3/appframework_azure_test.go
@@ -993,7 +993,7 @@ var _ = Describe("c3appfw test", func() {
// Deploy the Indexer Cluster
testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster")
indexerReplicas := 3
- _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "")
+ _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "")
Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster")
// Deploy the Search Head Cluster
diff --git a/test/appframework_az/c3/manager_appframework_azure_test.go b/test/appframework_az/c3/manager_appframework_azure_test.go
index 2422d3e85..4412efe43 100644
--- a/test/appframework_az/c3/manager_appframework_azure_test.go
+++ b/test/appframework_az/c3/manager_appframework_azure_test.go
@@ -991,7 +991,7 @@ var _ = Describe("c3appfw test", func() {
// Deploy the Indexer Cluster
testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster")
indexerReplicas := 3
- _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "")
+ _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "")
Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster")
// Deploy the Search Head Cluster
diff --git a/test/appframework_gcp/c3/manager_appframework_test.go b/test/appframework_gcp/c3/manager_appframework_test.go
index 02b7c81be..66c553e47 100644
--- a/test/appframework_gcp/c3/manager_appframework_test.go
+++ b/test/appframework_gcp/c3/manager_appframework_test.go
@@ -361,7 +361,7 @@ var _ = Describe("c3appfw test", func() {
shcName := fmt.Sprintf("%s-shc", deployment.GetName())
idxName := fmt.Sprintf("%s-idxc", deployment.GetName())
shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName)
- idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "")
+ idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "")
// Wait for License Manager to be in READY phase
testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst)
@@ -3327,7 +3327,7 @@ var _ = Describe("c3appfw test", func() {
// Deploy the Indexer Cluster
testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster")
indexerReplicas := 3
- _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "")
+ _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "")
Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster")
// Deploy the Search Head Cluster
diff --git a/test/custom_resource_crud/custom_resource_crud_c3_test.go b/test/custom_resource_crud/custom_resource_crud_c3_test.go
index 5ec5f4f12..5d377d8dc 100644
--- a/test/custom_resource_crud/custom_resource_crud_c3_test.go
+++ b/test/custom_resource_crud/custom_resource_crud_c3_test.go
@@ -69,6 +69,7 @@ var _ = Describe("Crcrud test for SVA C3", func() {
// Deploy Single site Cluster and Search Head Clusters
mcRef := deployment.GetName()
+ prevTelemetrySubmissionTime := testenv.GetTelemetryLastSubmissionTime(ctx, deployment)
err := deployment.DeploySingleSiteCluster(ctx, deployment.GetName(), 3, true /*shc*/, mcRef)
Expect(err).To(Succeed(), "Unable to deploy cluster")
@@ -81,6 +82,10 @@ var _ = Describe("Crcrud test for SVA C3", func() {
// Ensure Indexers go to Ready phase
testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst)
+ // Verify telemetry
+ testenv.TriggerTelemetrySubmission(ctx, deployment)
+ testenv.VerifyTelemetry(ctx, deployment, prevTelemetrySubmissionTime)
+
// Deploy Monitoring Console CRD
mc, err := deployment.DeployMonitoringConsole(ctx, mcRef, "")
Expect(err).To(Succeed(), "Unable to deploy Monitoring Console One instance")
diff --git a/test/custom_resource_crud/custom_resource_crud_m4_test.go b/test/custom_resource_crud/custom_resource_crud_m4_test.go
index 3f5af549d..887530f94 100644
--- a/test/custom_resource_crud/custom_resource_crud_m4_test.go
+++ b/test/custom_resource_crud/custom_resource_crud_m4_test.go
@@ -65,6 +65,7 @@ var _ = Describe("Crcrud test for SVA M4", func() {
// Deploy Multisite Cluster and Search Head Clusters
mcRef := deployment.GetName()
+ prevTelemetrySubmissionTime := testenv.GetTelemetryLastSubmissionTime(ctx, deployment)
siteCount := 3
err := deployment.DeployMultisiteClusterMasterWithSearchHead(ctx, deployment.GetName(), 1, siteCount, mcRef)
Expect(err).To(Succeed(), "Unable to deploy cluster")
@@ -81,6 +82,10 @@ var _ = Describe("Crcrud test for SVA M4", func() {
// Ensure search head cluster go to Ready phase
testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst)
+ // Verify telemetry
+ testenv.TriggerTelemetrySubmission(ctx, deployment)
+ testenv.VerifyTelemetry(ctx, deployment, prevTelemetrySubmissionTime)
+
// Deploy Monitoring Console CRD
mc, err := deployment.DeployMonitoringConsole(ctx, mcRef, "")
Expect(err).To(Succeed(), "Unable to deploy Monitoring Console One instance")
diff --git a/test/custom_resource_crud/custom_resource_crud_s1_test.go b/test/custom_resource_crud/custom_resource_crud_s1_test.go
index 3747eeb4d..2b7f1e1e6 100644
--- a/test/custom_resource_crud/custom_resource_crud_s1_test.go
+++ b/test/custom_resource_crud/custom_resource_crud_s1_test.go
@@ -65,12 +65,17 @@ var _ = Describe("Crcrud test for SVA S1", func() {
// Deploy Standalone
mcRef := deployment.GetName()
+ prevTelemetrySubmissionTime := testenv.GetTelemetryLastSubmissionTime(ctx, deployment)
standalone, err := deployment.DeployStandalone(ctx, deployment.GetName(), mcRef, "")
Expect(err).To(Succeed(), "Unable to deploy standalone instance")
// Verify Standalone goes to ready state
testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst)
+ // Verify telemetry
+ testenv.TriggerTelemetrySubmission(ctx, deployment)
+ testenv.VerifyTelemetry(ctx, deployment, prevTelemetrySubmissionTime)
+
// Deploy Monitoring Console CRD
mc, err := deployment.DeployMonitoringConsole(ctx, deployment.GetName(), "")
Expect(err).To(Succeed(), "Unable to deploy Monitoring Console One instance")
diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go
new file mode 100644
index 000000000..d2c4be9f1
--- /dev/null
+++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go
@@ -0,0 +1,133 @@
+// Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package indingsep
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/splunk/splunk-operator/test/testenv"
+)
+
+const (
+ // PollInterval specifies the polling interval
+ PollInterval = 5 * time.Second
+
+ // ConsistentPollInterval is the interval to use to consistently check a state is stable
+ ConsistentPollInterval = 200 * time.Millisecond
+ ConsistentDuration = 2000 * time.Millisecond
+)
+
+var (
+ testenvInstance *testenv.TestEnv
+ testSuiteName = "indingsep-" + testenv.RandomDNSName(3)
+
+ queue = enterpriseApi.QueueSpec{
+ Provider: "sqs",
+ SQS: enterpriseApi.SQSSpec{
+ Name: "index-ingest-separation-test-q",
+ AuthRegion: "us-west-2",
+ Endpoint: "https://sqs.us-west-2.amazonaws.com",
+ DLQ: "index-ingest-separation-test-dlq",
+ },
+ }
+ objectStorage = enterpriseApi.ObjectStorageSpec{
+ Provider: "s3",
+ S3: enterpriseApi.S3Spec{
+ Endpoint: "https://s3.us-west-2.amazonaws.com",
+ Path: "index-ingest-separation-test-bucket/smartbus-test",
+ },
+ }
+ serviceAccountName = "index-ingest-sa"
+
+ inputs = []string{
+ "[remote_queue:index-ingest-separation-test-q]",
+ "remote_queue.type = sqs_smartbus",
+ "remote_queue.sqs_smartbus.auth_region = us-west-2",
+ "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq",
+ "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com",
+ "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com",
+ "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test",
+ "remote_queue.sqs_smartbus.retry_policy = max_count",
+ "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"}
+ outputs = append(inputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 5s")
+ defaultsAll = []string{
+ "[pipeline:remotequeueruleset]\ndisabled = false",
+ "[pipeline:ruleset]\ndisabled = true",
+ "[pipeline:remotequeuetyping]\ndisabled = false",
+ "[pipeline:remotequeueoutput]\ndisabled = false",
+ "[pipeline:typing]\ndisabled = true",
+ }
+ defaultsIngest = append(defaultsAll, "[pipeline:indexerPipe]\ndisabled = true")
+
+ awsEnvVars = []string{
+ "AWS_REGION=us-west-2",
+ "AWS_DEFAULT_REGION=us-west-2",
+ "AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token",
+ "AWS_ROLE_ARN=arn:aws:iam::",
+ "AWS_STS_REGIONAL_ENDPOINTS=regional",
+ }
+
+ inputsShouldNotContain = []string{
+ "[remote_queue:index-ingest-separation-test-q]",
+ "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq",
+ "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test",
+ "remote_queue.sqs_smartbus.retry_policy = max_count",
+ "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"}
+ outputsShouldNotContain = append(inputs, "remote_queue.sqs_smartbus.send_interval = 5s")
+
+ testDataS3Bucket = os.Getenv("TEST_BUCKET")
+ testS3Bucket = os.Getenv("TEST_INDEXES_S3_BUCKET")
+ currDir, _ = os.Getwd()
+ downloadDirV1 = filepath.Join(currDir, "icappfwV1-"+testenv.RandomDNSName(4))
+ appSourceVolumeName = "appframework-test-volume-" + testenv.RandomDNSName(3)
+ s3TestDir = "icappfw-" + testenv.RandomDNSName(4)
+ appListV1 = testenv.BasicApps
+ s3AppDirV1 = testenv.AppLocationV1
+)
+
+// TestBasic is the main entry point
+func TestBasic(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ RunSpecs(t, "Running "+testSuiteName)
+}
+
+var _ = BeforeSuite(func() {
+ var err error
+ testenvInstance, err = testenv.NewDefaultTestEnv(testSuiteName)
+ Expect(err).ToNot(HaveOccurred())
+
+ appListV1 = testenv.BasicApps
+ appFileList := testenv.GetAppFileList(appListV1)
+
+ // Download V1 Apps from S3
+ err = testenv.DownloadFilesFromS3(testDataS3Bucket, s3AppDirV1, downloadDirV1, appFileList)
+ Expect(err).To(Succeed(), "Unable to download V1 app files")
+})
+
+var _ = AfterSuite(func() {
+ if testenvInstance != nil {
+ Expect(testenvInstance.Teardown()).ToNot(HaveOccurred())
+ }
+
+ err := os.RemoveAll(downloadDirV1)
+ Expect(err).To(Succeed(), "Unable to delete locally downloaded V1 app files")
+})
diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go
new file mode 100644
index 000000000..85c7de276
--- /dev/null
+++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go
@@ -0,0 +1,377 @@
+// Copyright (c) 2018-2026 Splunk Inc. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package indingsep
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/onsi/ginkgo/types"
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ "github.com/splunk/splunk-operator/pkg/splunk/enterprise"
+
+ "github.com/splunk/splunk-operator/test/testenv"
+)
+
+var _ = Describe("indingsep test", func() {
+
+ var testcaseEnvInst *testenv.TestCaseEnv
+ var deployment *testenv.Deployment
+
+ var cmSpec enterpriseApi.ClusterManagerSpec
+
+ ctx := context.TODO()
+
+ BeforeEach(func() {
+ var err error
+
+ name := fmt.Sprintf("%s-%s", testenvInstance.GetName(), testenv.RandomDNSName(3))
+ testcaseEnvInst, err = testenv.NewDefaultTestCaseEnv(testenvInstance.GetKubeClient(), name)
+ Expect(err).To(Succeed(), "Unable to create testcaseenv")
+
+ deployment, err = testcaseEnvInst.NewDeployment(testenv.RandomDNSName(3))
+ Expect(err).To(Succeed(), "Unable to create deployment")
+
+ cmSpec = enterpriseApi.ClusterManagerSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ Spec: enterpriseApi.Spec{
+ ImagePullPolicy: "Always",
+ Image: testcaseEnvInst.GetSplunkImage(),
+ },
+ },
+ }
+ })
+
+ AfterEach(func() {
+ if types.SpecState(CurrentSpecReport().State) == types.SpecStateFailed {
+ testcaseEnvInst.SkipTeardown = true
+ }
+ if deployment != nil {
+ deployment.Teardown()
+ }
+
+ if testcaseEnvInst != nil {
+ Expect(testcaseEnvInst.Teardown()).ToNot(HaveOccurred())
+ }
+ })
+
+ Context("Ingestor and Indexer deployment", func() {
+ It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers", func() {
+ // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+
+ // Create Service Account
+ // testcaseEnvInst.Log.Info("Create Service Account")
+ // testcaseEnvInst.CreateServiceAccount(serviceAccountName)
+
+ // Secret reference
+ volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())}
+ queue.SQS.VolList = volumeSpec
+
+ // Deploy Queue
+ testcaseEnvInst.Log.Info("Deploy Queue")
+ q, err := deployment.DeployQueue(ctx, "queue", queue)
+ Expect(err).To(Succeed(), "Unable to deploy Queue")
+
+ // Deploy ObjectStorage
+ testcaseEnvInst.Log.Info("Deploy ObjectStorage")
+ objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage)
+ Expect(err).To(Succeed(), "Unable to deploy ObjectStorage")
+
+ // Deploy Ingestor Cluster
+ testcaseEnvInst.Log.Info("Deploy Ingestor Cluster")
+ _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName)
+ Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster")
+
+ // Deploy Cluster Manager
+ testcaseEnvInst.Log.Info("Deploy Cluster Manager")
+ _, err = deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec)
+ Expect(err).To(Succeed(), "Unable to deploy Cluster Manager")
+
+ // Deploy Indexer Cluster
+ testcaseEnvInst.Log.Info("Deploy Indexer Cluster")
+ _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName)
+ Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster")
+
+ // Ensure that Ingestor Cluster is in Ready phase
+ testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase")
+ testenv.IngestorReady(ctx, deployment, testcaseEnvInst)
+
+ // Ensure that Cluster Manager is in Ready phase
+ testcaseEnvInst.Log.Info("Ensure that Cluster Manager is in Ready phase")
+ testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst)
+
+ // Ensure that Indexer Cluster is in Ready phase
+ testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase")
+ testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst)
+
+ // Delete the Indexer Cluster
+ idxc := &enterpriseApi.IndexerCluster{}
+ err = deployment.GetInstance(ctx, deployment.GetName()+"-idxc", idxc)
+ Expect(err).To(Succeed(), "Unable to get Indexer Cluster instance", "Indexer Cluster Name", idxc)
+ err = deployment.DeleteCR(ctx, idxc)
+ Expect(err).To(Succeed(), "Unable to delete Indexer Cluster instance", "Indexer Cluster Name", idxc)
+
+ // Delete the Ingestor Cluster
+ ingest := &enterpriseApi.IngestorCluster{}
+ err = deployment.GetInstance(ctx, deployment.GetName()+"-ingest", ingest)
+ Expect(err).To(Succeed(), "Unable to get Ingestor Cluster instance", "Ingestor Cluster Name", ingest)
+ err = deployment.DeleteCR(ctx, ingest)
+ Expect(err).To(Succeed(), "Unable to delete Ingestor Cluster instance", "Ingestor Cluster Name", ingest)
+
+ // Delete the Queue
+ q = &enterpriseApi.Queue{}
+ err = deployment.GetInstance(ctx, "queue", q)
+ Expect(err).To(Succeed(), "Unable to get Queue instance", "Queue Name", q)
+ err = deployment.DeleteCR(ctx, q)
+ Expect(err).To(Succeed(), "Unable to delete Queue", "Queue Name", q)
+
+ // Delete the ObjectStorage
+ objStorage = &enterpriseApi.ObjectStorage{}
+ err = deployment.GetInstance(ctx, "os", objStorage)
+ Expect(err).To(Succeed(), "Unable to get ObjectStorage instance", "ObjectStorage Name", objStorage)
+ err = deployment.DeleteCR(ctx, objStorage)
+ Expect(err).To(Succeed(), "Unable to delete ObjectStorage", "ObjectStorage Name", objStorage)
+ })
+ })
+
+ Context("Ingestor and Indexer deployment", func() {
+ It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers with additional configurations", func() {
+ // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+
+ // Create Service Account
+ // testcaseEnvInst.Log.Info("Create Service Account")
+ // testcaseEnvInst.CreateServiceAccount(serviceAccountName)
+
+ // Secret reference
+ volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())}
+ queue.SQS.VolList = volumeSpec
+
+ // Deploy Queue
+ testcaseEnvInst.Log.Info("Deploy Queue")
+ q, err := deployment.DeployQueue(ctx, "queue", queue)
+ Expect(err).To(Succeed(), "Unable to deploy Queue")
+
+ // Deploy ObjectStorage
+ testcaseEnvInst.Log.Info("Deploy ObjectStorage")
+ objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage)
+ Expect(err).To(Succeed(), "Unable to deploy ObjectStorage")
+
+ // Deploy Ingestor Cluster with additional configurations (similar to standalone app framework test)
+ appSourceName := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3)
+ appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, s3TestDir, 60)
+ appFrameworkSpec.MaxConcurrentAppDownloads = uint64(5)
+ ic := &enterpriseApi.IngestorCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: deployment.GetName() + "-ingest",
+ Namespace: testcaseEnvInst.GetName(),
+ Finalizers: []string{"enterprise.splunk.com/delete-pvc"},
+ },
+ Spec: enterpriseApi.IngestorClusterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ // ServiceAccount: serviceAccountName,
+ LivenessInitialDelaySeconds: 600,
+ ReadinessInitialDelaySeconds: 50,
+ StartupProbe: &enterpriseApi.Probe{
+ InitialDelaySeconds: 40,
+ TimeoutSeconds: 30,
+ PeriodSeconds: 30,
+ FailureThreshold: 12,
+ },
+ LivenessProbe: &enterpriseApi.Probe{
+ InitialDelaySeconds: 400,
+ TimeoutSeconds: 30,
+ PeriodSeconds: 30,
+ FailureThreshold: 12,
+ },
+ ReadinessProbe: &enterpriseApi.Probe{
+ InitialDelaySeconds: 20,
+ TimeoutSeconds: 30,
+ PeriodSeconds: 30,
+ FailureThreshold: 12,
+ },
+ Spec: enterpriseApi.Spec{
+ ImagePullPolicy: "Always",
+ Image: testcaseEnvInst.GetSplunkImage(),
+ },
+ },
+ QueueRef: v1.ObjectReference{Name: q.Name},
+ ObjectStorageRef: v1.ObjectReference{Name: objStorage.Name},
+ Replicas: 3,
+ AppFrameworkConfig: appFrameworkSpec,
+ },
+ }
+
+ testcaseEnvInst.Log.Info("Deploy Ingestor Cluster with additional configurations")
+ _, err = deployment.DeployIngestorClusterWithAdditionalConfiguration(ctx, ic)
+ Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster")
+
+ // Ensure that Ingestor Cluster is in Ready phase
+ testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase")
+ testenv.IngestorReady(ctx, deployment, testcaseEnvInst)
+
+ // Upload apps to S3
+ testcaseEnvInst.Log.Info("Upload apps to S3")
+ appFileList := testenv.GetAppFileList(appListV1)
+ _, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV1)
+ Expect(err).To(Succeed(), "Unable to upload V1 apps to S3 test directory for IngestorCluster")
+
+ // Verify Ingestor Cluster Pods have apps installed
+ testcaseEnvInst.Log.Info("Verify Ingestor Cluster Pods have apps installed")
+ ingestorPod := []string{fmt.Sprintf(testenv.IngestorPod, deployment.GetName()+"-ingest", 0)}
+ ingestorAppSourceInfo := testenv.AppSourceInfo{
+ CrKind: ic.Kind,
+ CrName: ic.Name,
+ CrAppSourceName: appSourceName,
+ CrPod: ingestorPod,
+ CrAppVersion: "V1",
+ CrAppScope: enterpriseApi.ScopeLocal,
+ CrAppList: testenv.BasicApps,
+ CrAppFileList: testenv.GetAppFileList(testenv.BasicApps),
+ CrReplicas: 3,
+ }
+ allAppSourceInfo := []testenv.AppSourceInfo{ingestorAppSourceInfo}
+ splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName())
+ testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "")
+
+ // Verify probe configuration
+ testcaseEnvInst.Log.Info("Get config map for probes")
+ ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName())
+ _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName)
+ Expect(err).To(Succeed(), "Unable to get config map for probes", "ConfigMap", ConfigMapName)
+ testcaseEnvInst.Log.Info("Verify probe configurations on Ingestor pods")
+ scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()}
+ allPods := testenv.DumpGetPods(testcaseEnvInst.GetName())
+ testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true)
+ })
+ })
+
+ Context("Ingestor and Indexer deployment", func() {
+ It("indingsep, integration, indingsep: Splunk Operator can deploy Ingestors and Indexers with correct setup", func() {
+ // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+
+ // Create Service Account
+ // testcaseEnvInst.Log.Info("Create Service Account")
+ // testcaseEnvInst.CreateServiceAccount(serviceAccountName)
+
+ // Secret reference
+ volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())}
+ queue.SQS.VolList = volumeSpec
+
+ // Deploy Queue
+ testcaseEnvInst.Log.Info("Deploy Queue")
+ q, err := deployment.DeployQueue(ctx, "queue", queue)
+ Expect(err).To(Succeed(), "Unable to deploy Queue")
+
+ // Deploy ObjectStorage
+ testcaseEnvInst.Log.Info("Deploy ObjectStorage")
+ objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage)
+ Expect(err).To(Succeed(), "Unable to deploy ObjectStorage")
+
+ // Deploy Ingestor Cluster
+ testcaseEnvInst.Log.Info("Deploy Ingestor Cluster")
+ _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName)
+ Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster")
+
+ // Deploy Cluster Manager
+ testcaseEnvInst.Log.Info("Deploy Cluster Manager")
+ _, err = deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec)
+ Expect(err).To(Succeed(), "Unable to deploy Cluster Manager")
+
+ // Deploy Indexer Cluster
+ testcaseEnvInst.Log.Info("Deploy Indexer Cluster")
+ _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName)
+ Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster")
+
+ // Ensure that Ingestor Cluster is in Ready phase
+ testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase")
+ testenv.IngestorReady(ctx, deployment, testcaseEnvInst)
+
+ // Ensure that Cluster Manager is in Ready phase
+ testcaseEnvInst.Log.Info("Ensure that Cluster Manager is in Ready phase")
+ testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst)
+
+ // Ensure that Indexer Cluster is in Ready phase
+ testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase")
+ testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst)
+
+ // Get instance of current Ingestor Cluster CR with latest config
+ testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config")
+ ingest := &enterpriseApi.IngestorCluster{}
+ err = deployment.GetInstance(ctx, deployment.GetName()+"-ingest", ingest)
+ Expect(err).To(Succeed(), "Failed to get instance of Ingestor Cluster")
+
+ // Verify Ingestor Cluster Status
+ testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status")
+ Expect(ingest.Status.CredentialSecretVersion).To(Not(Equal("")), "Ingestor queue status credential access secret version is empty")
+ Expect(ingest.Status.CredentialSecretVersion).To(Not(Equal("0")), "Ingestor queue status credential access secret version is 0")
+
+ // Get instance of current Indexer Cluster CR with latest config
+ testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config")
+ index := &enterpriseApi.IndexerCluster{}
+ err = deployment.GetInstance(ctx, deployment.GetName()+"-idxc", index)
+ Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster")
+
+ // Verify Indexer Cluster Status
+ testcaseEnvInst.Log.Info("Verify Indexer Cluster Status")
+ Expect(index.Status.CredentialSecretVersion).To(Not(Equal("")), "Indexer queue status credential access secret version is empty")
+ Expect(index.Status.CredentialSecretVersion).To(Not(Equal("0")), "Indexer queue status credential access secret version is 0")
+
+ // Verify conf files
+ testcaseEnvInst.Log.Info("Verify conf files")
+ pods := testenv.DumpGetPods(deployment.GetName())
+ for _, pod := range pods {
+ defaultsConf := ""
+
+ if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") {
+ // Verify outputs.conf
+ testcaseEnvInst.Log.Info("Verify outputs.conf")
+ outputsPath := "opt/splunk/etc/system/local/outputs.conf"
+ outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName())
+ Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod")
+ testenv.ValidateContent(outputsConf, outputs, true)
+
+ // Verify default-mode.conf
+ testcaseEnvInst.Log.Info("Verify default-mode.conf")
+ defaultsPath := "opt/splunk/etc/system/local/default-mode.conf"
+ defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName())
+ Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod")
+ testenv.ValidateContent(defaultsConf, defaultsAll, true)
+
+ // Verify AWS env variables
+ testcaseEnvInst.Log.Info("Verify AWS env variables")
+ envVars, err := testenv.GetAWSEnv(pod, deployment.GetName())
+ Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod")
+ testenv.ValidateContent(envVars, awsEnvVars, true)
+ }
+
+ if strings.Contains(pod, "ingest") {
+ // Verify default-mode.conf
+ testcaseEnvInst.Log.Info("Verify default-mode.conf")
+ testenv.ValidateContent(defaultsConf, defaultsIngest, true)
+ } else if strings.Contains(pod, "idxc") {
+ // Verify inputs.conf
+ testcaseEnvInst.Log.Info("Verify inputs.conf")
+ inputsPath := "opt/splunk/etc/system/local/inputs.conf"
+ inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName())
+ Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod")
+ testenv.ValidateContent(inputsConf, inputs, true)
+ }
+ }
+ })
+ })
+})
diff --git a/test/testenv/appframework_utils.go b/test/testenv/appframework_utils.go
index d1f2f938c..e9879679b 100644
--- a/test/testenv/appframework_utils.go
+++ b/test/testenv/appframework_utils.go
@@ -250,6 +250,28 @@ func GetAppDeploymentInfoStandalone(ctx context.Context, deployment *Deployment,
return appDeploymentInfo, err
}
+// GetAppDeploymentInfoIngestorCluster returns AppDeploymentInfo for given IngestorCluster, appSourceName and appName
+func GetAppDeploymentInfoIngestorCluster(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, name string, appSourceName string, appName string) (enterpriseApi.AppDeploymentInfo, error) {
+ ingestor := &enterpriseApi.IngestorCluster{}
+ appDeploymentInfo := enterpriseApi.AppDeploymentInfo{}
+ err := deployment.GetInstance(ctx, name, ingestor)
+ if err != nil {
+ testenvInstance.Log.Error(err, "Failed to get CR ", "CR Name", name)
+ return appDeploymentInfo, err
+ }
+ appInfoList := ingestor.Status.AppContext.AppsSrcDeployStatus[appSourceName].AppDeploymentInfoList
+ for _, appInfo := range appInfoList {
+ testenvInstance.Log.Info("Checking Ingestor AppInfo Struct", "App Name", appName, "App Source", appSourceName, "Ingestor Name", name, "AppDeploymentInfo", appInfo)
+ if strings.Contains(appName, appInfo.AppName) {
+ testenvInstance.Log.Info("App Deployment Info found.", "App Name", appName, "App Source", appSourceName, "Ingestor Name", name, "AppDeploymentInfo", appInfo)
+ appDeploymentInfo = appInfo
+ return appDeploymentInfo, nil
+ }
+ }
+ testenvInstance.Log.Info("App Info not found in App Info List", "App Name", appName, "App Source", appSourceName, "Ingestor Name", name, "App Info List", appInfoList)
+ return appDeploymentInfo, err
+}
+
// GetAppDeploymentInfoMonitoringConsole returns AppDeploymentInfo for given Monitoring Console, appSourceName and appName
func GetAppDeploymentInfoMonitoringConsole(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, name string, appSourceName string, appName string) (enterpriseApi.AppDeploymentInfo, error) {
mc := &enterpriseApi.MonitoringConsole{}
@@ -345,6 +367,8 @@ func GetAppDeploymentInfo(ctx context.Context, deployment *Deployment, testenvIn
switch crKind {
case "Standalone":
appDeploymentInfo, err = GetAppDeploymentInfoStandalone(ctx, deployment, testenvInstance, name, appSourceName, appName)
+ case "IngestorCluster":
+ appDeploymentInfo, err = GetAppDeploymentInfoIngestorCluster(ctx, deployment, testenvInstance, name, appSourceName, appName)
case "MonitoringConsole":
appDeploymentInfo, err = GetAppDeploymentInfoMonitoringConsole(ctx, deployment, testenvInstance, name, appSourceName, appName)
case "SearchHeadCluster":
diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go
index 85e753a84..e639a9513 100644
--- a/test/testenv/deployment.go
+++ b/test/testenv/deployment.go
@@ -431,9 +431,9 @@ func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(ctx context.Contex
}
// DeployIndexerCluster deploys the indexer cluster
-func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string) (*enterpriseApi.IndexerCluster, error) {
+func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, queue, os corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) {
d.testenv.Log.Info("Deploying indexer cluster", "name", name, "CM", clusterManagerRef)
- indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage)
+ indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, queue, os, serviceAccountName)
pdata, _ := json.Marshal(indexer)
d.testenv.Log.Info("indexer cluster spec", "cr", string(pdata))
deployed, err := d.deployCR(ctx, name, indexer)
@@ -444,6 +444,69 @@ func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseMana
return deployed.(*enterpriseApi.IndexerCluster), err
}
+// DeployIngestorCluster deploys the ingestor cluster
+func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, queue, os corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) {
+ d.testenv.Log.Info("Deploying ingestor cluster", "name", name)
+
+ ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, queue, os, serviceAccountName)
+ pdata, _ := json.Marshal(ingestor)
+
+ d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata))
+ deployed, err := d.deployCR(ctx, name, ingestor)
+ if err != nil {
+ return nil, err
+ }
+
+ return deployed.(*enterpriseApi.IngestorCluster), err
+}
+
+// DeployQueue deploys the queue
+func (d *Deployment) DeployQueue(ctx context.Context, name string, queue enterpriseApi.QueueSpec) (*enterpriseApi.Queue, error) {
+ d.testenv.Log.Info("Deploying queue", "name", name)
+
+ queueCfg := newQueue(name, d.testenv.namespace, queue)
+ pdata, _ := json.Marshal(queueCfg)
+
+ d.testenv.Log.Info("queue spec", "cr", string(pdata))
+ deployed, err := d.deployCR(ctx, name, queueCfg)
+ if err != nil {
+ return nil, err
+ }
+
+ return deployed.(*enterpriseApi.Queue), err
+}
+
+// DeployObjectStorage deploys the object storage
+func (d *Deployment) DeployObjectStorage(ctx context.Context, name string, objStorage enterpriseApi.ObjectStorageSpec) (*enterpriseApi.ObjectStorage, error) {
+ d.testenv.Log.Info("Deploying object storage", "name", name)
+
+ objStorageCfg := newObjectStorage(name, d.testenv.namespace, objStorage)
+ pdata, _ := json.Marshal(objStorageCfg)
+
+ d.testenv.Log.Info("object storage spec", "cr", string(pdata))
+ deployed, err := d.deployCR(ctx, name, objStorageCfg)
+ if err != nil {
+ return nil, err
+ }
+
+ return deployed.(*enterpriseApi.ObjectStorage), err
+}
+
+// DeployIngestorClusterWithAdditionalConfiguration deploys the ingestor cluster with additional configuration
+func (d *Deployment) DeployIngestorClusterWithAdditionalConfiguration(ctx context.Context, ic *enterpriseApi.IngestorCluster) (*enterpriseApi.IngestorCluster, error) {
+ d.testenv.Log.Info("Deploying ingestor cluster with additional configuration", "name", ic.Name)
+
+ pdata, _ := json.Marshal(ic)
+
+ d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata))
+ deployed, err := d.deployCR(ctx, ic.Name, ic)
+ if err != nil {
+ return nil, err
+ }
+
+ return deployed.(*enterpriseApi.IngestorCluster), err
+}
+
// DeploySearchHeadCluster deploys a search head cluster
func (d *Deployment) DeploySearchHeadCluster(ctx context.Context, name, ClusterManagerRef, LicenseManagerName string, ansibleConfig string, mcRef string) (*enterpriseApi.SearchHeadCluster, error) {
d.testenv.Log.Info("Deploying search head cluster", "name", name)
@@ -576,6 +639,33 @@ func (d *Deployment) UpdateCR(ctx context.Context, cr client.Object) error {
ucr := cr.(*enterpriseApi.IndexerCluster)
current.Spec = ucr.Spec
cobject = current
+ case "IngestorCluster":
+ current := &enterpriseApi.IngestorCluster{}
+ err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current)
+ if err != nil {
+ return err
+ }
+ ucr := cr.(*enterpriseApi.IngestorCluster)
+ current.Spec = ucr.Spec
+ cobject = current
+ case "Queue":
+ current := &enterpriseApi.Queue{}
+ err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current)
+ if err != nil {
+ return err
+ }
+ ucr := cr.(*enterpriseApi.Queue)
+ current.Spec = ucr.Spec
+ cobject = current
+ case "ObjectStorage":
+ current := &enterpriseApi.ObjectStorage{}
+ err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current)
+ if err != nil {
+ return err
+ }
+ ucr := cr.(*enterpriseApi.ObjectStorage)
+ current.Spec = ucr.Spec
+ cobject = current
case "ClusterMaster":
current := &enterpriseApiV3.ClusterMaster{}
err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current)
@@ -675,7 +765,7 @@ func (d *Deployment) DeploySingleSiteCluster(ctx context.Context, name string, i
}
// Deploy the indexer cluster
- _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "")
+ _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return err
}
@@ -733,7 +823,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHead(ctx context.Cont
multisite_master: splunk-%s-%s-service
site: %s
`, name, "cluster-master", siteName)
- _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults)
+ _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return err
}
@@ -805,7 +895,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHead(ctx context.Context, n
multisite_master: splunk-%s-%s-service
site: %s
`, name, "cluster-manager", siteName)
- _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults)
+ _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return err
}
@@ -866,7 +956,7 @@ func (d *Deployment) DeployMultisiteCluster(ctx context.Context, name string, in
multisite_master: splunk-%s-%s-service
site: %s
`, name, "cluster-manager", siteName)
- _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults)
+ _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return err
}
@@ -1002,7 +1092,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndIndexes(ctx context.
multisite_master: splunk-%s-%s-service
site: %s
`, name, "cluster-manager", siteName)
- _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults)
+ _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return err
}
@@ -1057,7 +1147,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndIndexes(ctx co
multisite_master: splunk-%s-%s-service
site: %s
`, name, "cluster-master", siteName)
- _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults)
+ _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return err
}
@@ -1162,7 +1252,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx contex
}
// Deploy the indexer cluster
- idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "")
+ idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return cm, idxc, sh, err
}
@@ -1240,7 +1330,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenAppFrameworkSpec(ctx
}
// Deploy the indexer cluster
- idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "")
+ idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return cm, idxc, sh, err
}
@@ -1340,7 +1430,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx con
multisite_master: splunk-%s-%s-service
site: %s
`, name, "cluster-manager", siteName)
- idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults)
+ idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return cm, idxc, sh, err
}
@@ -1444,7 +1534,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(c
multisite_master: splunk-%s-%s-service
site: %s
`, name, "cluster-master", siteName)
- idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults)
+ idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return cm, idxc, sh, err
}
@@ -1525,7 +1615,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenMonitoringConsole(ctx conte
}
// Deploy the indexer cluster
- _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "")
+ _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return err
}
@@ -1597,7 +1687,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenMonitoringConsole(ctx
}
// Deploy the indexer cluster
- _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "")
+ _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return err
}
@@ -1691,7 +1781,7 @@ func (d *Deployment) DeployMultisiteClusterWithMonitoringConsole(ctx context.Con
multisite_master: splunk-%s-%s-service
site: %s
`, name, "cluster-manager", siteName)
- _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults)
+ _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return err
}
@@ -1791,7 +1881,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithMonitoringConsole(ctx conte
multisite_master: splunk-%s-%s-service
site: %s
`, name, "cluster-master", siteName)
- _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults)
+ _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "")
if err != nil {
return err
}
@@ -1830,3 +1920,13 @@ func (d *Deployment) DeployMultisiteClusterMasterWithMonitoringConsole(ctx conte
}
return nil
}
+
+// GetConfigMap retrieves a ConfigMap by name in the deployment's namespace.
+func (d *Deployment) GetConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error) {
+ cm := &corev1.ConfigMap{}
+ err := d.testenv.GetKubeClient().Get(ctx, client.ObjectKey{Name: name, Namespace: d.testenv.namespace}, cm)
+ if err != nil {
+ return nil, err
+ }
+ return cm, nil
+}
diff --git a/test/testenv/gcputils.go b/test/testenv/gcputils.go
index e999f4116..be54c586b 100644
--- a/test/testenv/gcputils.go
+++ b/test/testenv/gcputils.go
@@ -5,6 +5,7 @@ import (
"compress/gzip"
"context"
"encoding/base64"
+
//"encoding/json"
"errors"
"fmt"
@@ -16,6 +17,7 @@ import (
"cloud.google.com/go/storage"
"github.com/google/uuid"
+
//"golang.org/x/oauth2/google"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
diff --git a/test/testenv/remote_index_utils.go b/test/testenv/remote_index_utils.go
index 0eb2b485c..f696a4a17 100644
--- a/test/testenv/remote_index_utils.go
+++ b/test/testenv/remote_index_utils.go
@@ -86,6 +86,14 @@ func RollHotToWarm(ctx context.Context, deployment *Deployment, podName string,
return true
}
+// GenerateQueueVolumeSpec return VolumeSpec struct with given values
+func GenerateQueueVolumeSpec(name, secretRef string) enterpriseApi.VolumeSpec {
+ return enterpriseApi.VolumeSpec{
+ Name: name,
+ SecretRef: secretRef,
+ }
+}
+
// GenerateIndexVolumeSpec return VolumeSpec struct with given values
func GenerateIndexVolumeSpec(volumeName string, endpoint string, secretRef string, provider string, storageType string, region string) enterpriseApi.VolumeSpec {
return enterpriseApi.VolumeSpec{
diff --git a/test/testenv/testcaseenv.go b/test/testenv/testcaseenv.go
index 3987226ab..cb3c8a107 100644
--- a/test/testenv/testcaseenv.go
+++ b/test/testenv/testcaseenv.go
@@ -35,24 +35,25 @@ import (
// TestCaseEnv represents a namespaced-isolated k8s cluster environment (aka virtual k8s cluster) to run test cases against
type TestCaseEnv struct {
- kubeClient client.Client
- name string
- namespace string
- serviceAccountName string
- roleName string
- roleBindingName string
- operatorName string
- operatorImage string
- splunkImage string
- initialized bool
- SkipTeardown bool
- licenseFilePath string
- licenseCMName string
- s3IndexSecret string
- Log logr.Logger
- cleanupFuncs []cleanupFunc
- debug string
- clusterWideOperator string
+ kubeClient client.Client
+ name string
+ namespace string
+ serviceAccountName string
+ roleName string
+ roleBindingName string
+ operatorName string
+ operatorImage string
+ splunkImage string
+ initialized bool
+ SkipTeardown bool
+ licenseFilePath string
+ licenseCMName string
+ s3IndexSecret string
+ indexIngestSepSecret string
+ Log logr.Logger
+ cleanupFuncs []cleanupFunc
+ debug string
+ clusterWideOperator string
}
// GetKubeClient returns the kube client to talk to kube-apiserver
@@ -79,21 +80,22 @@ func NewTestCaseEnv(kubeClient client.Client, name string, operatorImage string,
}
testenv := &TestCaseEnv{
- kubeClient: kubeClient,
- name: name,
- namespace: name,
- serviceAccountName: name,
- roleName: name,
- roleBindingName: name,
- operatorName: "splunk-op-" + name,
- operatorImage: operatorImage,
- splunkImage: splunkImage,
- SkipTeardown: specifiedSkipTeardown,
- licenseCMName: name,
- licenseFilePath: licenseFilePath,
- s3IndexSecret: "splunk-s3-index-" + name,
- debug: os.Getenv("DEBUG"),
- clusterWideOperator: installOperatorClusterWide,
+ kubeClient: kubeClient,
+ name: name,
+ namespace: name,
+ serviceAccountName: name,
+ roleName: name,
+ roleBindingName: name,
+ operatorName: "splunk-op-" + name,
+ operatorImage: operatorImage,
+ splunkImage: splunkImage,
+ SkipTeardown: specifiedSkipTeardown,
+ licenseCMName: name,
+ licenseFilePath: licenseFilePath,
+ s3IndexSecret: "splunk-s3-index-" + name,
+ indexIngestSepSecret: "splunk--index-ingest-sep-" + name,
+ debug: os.Getenv("DEBUG"),
+ clusterWideOperator: installOperatorClusterWide,
}
testenv.Log = logf.Log.WithValues("testcaseenv", testenv.name)
@@ -156,6 +158,7 @@ func (testenv *TestCaseEnv) setup() error {
switch ClusterProvider {
case "eks":
testenv.createIndexSecret()
+ testenv.createIndexIngestSepSecret()
case "azure":
testenv.createIndexSecretAzure()
case "gcp":
@@ -598,11 +601,41 @@ func (testenv *TestCaseEnv) createIndexSecretAzure() error {
return nil
}
+// CreateIndexIngestSepSecret creates secret object
+func (testenv *TestCaseEnv) createIndexIngestSepSecret() error {
+ secretName := testenv.indexIngestSepSecret
+ ns := testenv.namespace
+
+ data := map[string][]byte{"s3_access_key": []byte(os.Getenv("AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID")),
+ "s3_secret_key": []byte(os.Getenv("AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY"))}
+ secret := newSecretSpec(ns, secretName, data)
+
+ if err := testenv.GetKubeClient().Create(context.TODO(), secret); err != nil {
+ testenv.Log.Error(err, "Unable to create index and ingestion sep secret object")
+ return err
+ }
+
+ testenv.pushCleanupFunc(func() error {
+ err := testenv.GetKubeClient().Delete(context.TODO(), secret)
+ if err != nil {
+ testenv.Log.Error(err, "Unable to delete index and ingestion sep secret object")
+ return err
+ }
+ return nil
+ })
+ return nil
+}
+
// GetIndexSecretName return index secret object name
func (testenv *TestCaseEnv) GetIndexSecretName() string {
return testenv.s3IndexSecret
}
+// GetIndexSecretName return index and ingestion separation secret object name
+func (testenv *TestCaseEnv) GetIndexIngestSepSecretName() string {
+ return testenv.indexIngestSepSecret
+}
+
// GetLMConfigMap Return name of license config map
func (testenv *TestCaseEnv) GetLMConfigMap() string {
return testenv.licenseCMName
diff --git a/test/testenv/testenv.go b/test/testenv/testenv.go
index 7e4579ee2..06fe304d4 100644
--- a/test/testenv/testenv.go
+++ b/test/testenv/testenv.go
@@ -20,9 +20,10 @@ import (
"fmt"
"net"
"os"
- "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"time"
+ "sigs.k8s.io/controller-runtime/pkg/metrics/server"
+
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
@@ -77,6 +78,9 @@ const (
// LicenseMasterPod Template String for standalone pod
LicenseMasterPod = "splunk-%s-" + splcommon.LicenseManager + "-%d"
+ // IngestorPod Template String for ingestor pod
+ IngestorPod = "splunk-%s-ingestor-%d"
+
// IndexerPod Template String for indexer pod
IndexerPod = "splunk-%s-idxc-indexer-%d"
@@ -156,24 +160,25 @@ type cleanupFunc func() error
// TestEnv represents a namespaced-isolated k8s cluster environment (aka virtual k8s cluster) to run tests against
type TestEnv struct {
- kubeAPIServer string
- name string
- namespace string
- serviceAccountName string
- roleName string
- roleBindingName string
- operatorName string
- operatorImage string
- splunkImage string
- initialized bool
- SkipTeardown bool
- licenseFilePath string
- licenseCMName string
- s3IndexSecret string
- kubeClient client.Client
- Log logr.Logger
- cleanupFuncs []cleanupFunc
- debug string
+ kubeAPIServer string
+ name string
+ namespace string
+ serviceAccountName string
+ roleName string
+ roleBindingName string
+ operatorName string
+ operatorImage string
+ splunkImage string
+ initialized bool
+ SkipTeardown bool
+ licenseFilePath string
+ licenseCMName string
+ s3IndexSecret string
+ indexIngestSepSecret string
+ kubeClient client.Client
+ Log logr.Logger
+ cleanupFuncs []cleanupFunc
+ debug string
}
func init() {
@@ -227,19 +232,20 @@ func NewTestEnv(name, commitHash, operatorImage, splunkImage, licenseFilePath st
}
testenv := &TestEnv{
- name: envName,
- namespace: envName,
- serviceAccountName: envName,
- roleName: envName,
- roleBindingName: envName,
- operatorName: "splunk-op-" + envName,
- operatorImage: operatorImage,
- splunkImage: splunkImage,
- SkipTeardown: specifiedSkipTeardown,
- licenseCMName: envName,
- licenseFilePath: licenseFilePath,
- s3IndexSecret: "splunk-s3-index-" + envName,
- debug: os.Getenv("DEBUG"),
+ name: envName,
+ namespace: envName,
+ serviceAccountName: envName,
+ roleName: envName,
+ roleBindingName: envName,
+ operatorName: "splunk-op-" + envName,
+ operatorImage: operatorImage,
+ splunkImage: splunkImage,
+ SkipTeardown: specifiedSkipTeardown,
+ licenseCMName: envName,
+ licenseFilePath: licenseFilePath,
+ s3IndexSecret: "splunk-s3-index-" + envName,
+ indexIngestSepSecret: "splunk--index-ingest-sep-" + name,
+ debug: os.Getenv("DEBUG"),
}
testenv.Log = logf.Log.WithValues("testenv", testenv.name)
diff --git a/test/testenv/util.go b/test/testenv/util.go
index fce1b58b1..366ea3668 100644
--- a/test/testenv/util.go
+++ b/test/testenv/util.go
@@ -30,6 +30,8 @@ import (
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ . "github.com/onsi/gomega"
+
"github.com/onsi/ginkgo/v2"
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
@@ -357,7 +359,7 @@ func newClusterMasterWithGivenIndexes(name, ns, licenseManagerName, ansibleConfi
}
// newIndexerCluster creates and initialize the CR for IndexerCluster Kind
-func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string) *enterpriseApi.IndexerCluster {
+func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, queue, os corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster {
licenseMasterRef, licenseManagerRef := swapLicenseManager(name, licenseManagerName)
clusterMasterRef, clusterManagerRef := swapClusterManager(name, clusterManagerRef)
@@ -374,7 +376,8 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste
Spec: enterpriseApi.IndexerClusterSpec{
CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
- Volumes: []corev1.Volume{},
+ ServiceAccount: serviceAccountName,
+ Volumes: []corev1.Volume{},
Spec: enterpriseApi.Spec{
ImagePullPolicy: "Always",
Image: splunkImage,
@@ -393,13 +396,71 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste
},
Defaults: ansibleConfig,
},
- Replicas: int32(replicas),
+ Replicas: int32(replicas),
+ QueueRef: queue,
+ ObjectStorageRef: os,
},
}
return &new
}
+// newIngestorCluster creates and initialize the CR for IngestorCluster Kind
+func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue, os corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster {
+ return &enterpriseApi.IngestorCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "IngestorCluster",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: ns,
+ Finalizers: []string{"enterprise.splunk.com/delete-pvc"},
+ },
+
+ Spec: enterpriseApi.IngestorClusterSpec{
+ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{
+ ServiceAccount: serviceAccountName,
+ Volumes: []corev1.Volume{},
+ Spec: enterpriseApi.Spec{
+ ImagePullPolicy: "Always",
+ Image: splunkImage,
+ },
+ },
+ Replicas: int32(replicas),
+ QueueRef: queue,
+ ObjectStorageRef: os,
+ },
+ }
+}
+
+// newQueue creates and initializes the CR for Queue Kind
+func newQueue(name, ns string, queue enterpriseApi.QueueSpec) *enterpriseApi.Queue {
+ return &enterpriseApi.Queue{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Queue",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: ns,
+ },
+ Spec: queue,
+ }
+}
+
+// newObjectStorage creates and initializes the CR for ObjectStorage Kind
+func newObjectStorage(name, ns string, objStorage enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage {
+ return &enterpriseApi.ObjectStorage{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ObjectStorage",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: ns,
+ },
+ Spec: objStorage,
+ }
+}
+
func newSearchHeadCluster(name, ns, clusterManagerRef, licenseManagerName, ansibleConfig, splunkImage string) *enterpriseApi.SearchHeadCluster {
licenseMasterRef, licenseManagerRef := swapLicenseManager(name, licenseManagerName)
@@ -1188,3 +1249,47 @@ func DeleteConfigMap(ns string, ConfigMapName string) error {
}
return nil
}
+
+// GetConfFile gets config file from pod
+func GetConfFile(podName, filePath, ns string) (string, error) {
+ var config string
+ var err error
+
+ output, err := exec.Command("kubectl", "exec", "-n", ns, podName, "--", "cat", filePath).Output()
+ if err != nil {
+ cmd := fmt.Sprintf("kubectl exec -n %s %s -- cat %s", ns, podName, filePath)
+ logf.Log.Error(err, "Failed to execute command", "command", cmd)
+ return config, err
+ }
+
+ return string(output), err
+}
+
+// GetAWSEnv gets AWS environment variables from pod
+func GetAWSEnv(podName, ns string) (string, error) {
+ var config string
+ var err error
+
+ output, err := exec.Command("kubectl", "exec", "-n", ns, podName, "--", "env", "|", "grep", "-i", "aws").Output()
+ if err != nil {
+ cmd := fmt.Sprintf("kubectl exec -n %s %s -- env | grep -i aws", ns, podName)
+ logf.Log.Error(err, "Failed to execute command", "command", cmd)
+ return config, err
+ }
+
+ return string(output), err
+}
+
+func ValidateContent(confFileContent string, listOfStringsForValidation []string, shouldContain bool) {
+ for _, str := range listOfStringsForValidation {
+ if shouldContain {
+ if !strings.Contains(confFileContent, str) {
+ Expect(confFileContent).To(ContainSubstring(str), "Failed to find string "+str+" in conf file")
+ }
+ } else {
+ if strings.Contains(confFileContent, str) {
+ Expect(confFileContent).ToNot(ContainSubstring(str), "Found string "+str+" in conf file, but it should not be there")
+ }
+ }
+ }
+}
diff --git a/test/testenv/verificationutils.go b/test/testenv/verificationutils.go
index e5c734405..cb611254d 100644
--- a/test/testenv/verificationutils.go
+++ b/test/testenv/verificationutils.go
@@ -20,7 +20,9 @@ import (
"context"
"encoding/json"
"fmt"
+ "math/rand"
"os/exec"
+ "sigs.k8s.io/controller-runtime/pkg/client"
"strings"
"time"
@@ -185,6 +187,34 @@ func SingleSiteIndexersReady(ctx context.Context, deployment *Deployment, testen
}, ConsistentDuration, ConsistentPollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady))
}
+// IngestorsReady verify ingestors go to ready state
+func IngestorReady(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv) {
+ ingest := &enterpriseApi.IngestorCluster{}
+ instanceName := fmt.Sprintf("%s-ingest", deployment.GetName())
+
+ gomega.Eventually(func() enterpriseApi.Phase {
+ err := deployment.GetInstance(ctx, instanceName, ingest)
+ if err != nil {
+ return enterpriseApi.PhaseError
+ }
+
+ testenvInstance.Log.Info("Waiting for ingestor instance's phase to be ready", "instance", instanceName, "phase", ingest.Status.Phase)
+ DumpGetPods(testenvInstance.GetName())
+
+ return ingest.Status.Phase
+ }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady))
+
+ // In a steady state, we should stay in Ready and not flip-flop around
+ gomega.Consistently(func() enterpriseApi.Phase {
+ _ = deployment.GetInstance(ctx, instanceName, ingest)
+
+ testenvInstance.Log.Info("Check for Consistency ingestor instance's phase to be ready", "instance", instanceName, "phase", ingest.Status.Phase)
+ DumpGetSplunkVersion(ctx, testenvInstance.GetName(), deployment, "-ingest-")
+
+ return ingest.Status.Phase
+ }, ConsistentDuration, ConsistentPollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady))
+}
+
// ClusterManagerReady verify Cluster Manager Instance is in ready status
func ClusterManagerReady(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv) {
// Ensure that the cluster-manager goes to Ready phase
@@ -997,7 +1027,11 @@ func VerifyAppListPhase(ctx context.Context, deployment *Deployment, testenvInst
appDeploymentInfo, err := GetAppDeploymentInfo(ctx, deployment, testenvInstance, name, crKind, appSourceName, appName)
if err != nil {
testenvInstance.Log.Error(err, "Failed to get app deployment info")
- return phase
+ return phase // Continue polling
+ }
+ if appDeploymentInfo.AppName == "" {
+ testenvInstance.Log.Info(fmt.Sprintf("App deployment info not found yet for app %s (CR %s/%s, AppSource %s), continuing to poll", appName, crKind, name, appSourceName))
+ return phase // Continue polling
}
testenvInstance.Log.Info(fmt.Sprintf("App State found for CR %s NAME %s APP NAME %s Expected Phase should not be %s", crKind, name, appName, phase), "Actual Phase", appDeploymentInfo.PhaseInfo.Phase, "App State", appDeploymentInfo)
return appDeploymentInfo.PhaseInfo.Phase
@@ -1010,7 +1044,11 @@ func VerifyAppListPhase(ctx context.Context, deployment *Deployment, testenvInst
appDeploymentInfo, err := GetAppDeploymentInfo(ctx, deployment, testenvInstance, name, crKind, appSourceName, appName)
if err != nil {
testenvInstance.Log.Error(err, "Failed to get app deployment info")
- return enterpriseApi.PhaseDownload
+ return enterpriseApi.PhaseDownload // Continue polling
+ }
+ if appDeploymentInfo.AppName == "" {
+ testenvInstance.Log.Info(fmt.Sprintf("App deployment info not found yet for app %s (CR %s/%s, AppSource %s), continuing to poll", appName, crKind, name, appSourceName))
+ return enterpriseApi.PhaseDownload // Continue polling
}
testenvInstance.Log.Info(fmt.Sprintf("App State found for CR %s NAME %s APP NAME %s Expected Phase %s", crKind, name, appName, phase), "Actual Phase", appDeploymentInfo.PhaseInfo.Phase, "App Phase Status", appDeploymentInfo.PhaseInfo.Status, "App State", appDeploymentInfo)
if appDeploymentInfo.PhaseInfo.Status != enterpriseApi.AppPkgInstallComplete {
@@ -1213,3 +1251,84 @@ func VerifyFilesInDirectoryOnPod(ctx context.Context, deployment *Deployment, te
}, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true))
}
}
+
+func GetTelemetryLastSubmissionTime(ctx context.Context, deployment *Deployment) string {
+ const (
+ configMapName = "splunk-operator-manager-telemetry"
+ statusKey = "status"
+ )
+ type telemetryStatus struct {
+ LastTransmission string `json:"lastTransmission"`
+ }
+
+ cm := &corev1.ConfigMap{}
+ err := deployment.testenv.GetKubeClient().Get(ctx, client.ObjectKey{Name: configMapName, Namespace: "splunk-operator"}, cm)
+ if err != nil {
+ logf.Log.Error(err, "GetTelemetryLastSubmissionTime: failed to retrieve configmap")
+ return ""
+ }
+
+ statusVal, ok := cm.Data[statusKey]
+ if !ok || statusVal == "" {
+ logf.Log.Info("GetTelemetryLastSubmissionTime: failed to retrieve status")
+ return ""
+ }
+ logf.Log.Info("GetTelemetryLastSubmissionTime: retrieved status", "status", statusVal)
+
+ var status telemetryStatus
+ if err := json.Unmarshal([]byte(statusVal), &status); err != nil {
+ logf.Log.Error(err, "GetTelemetryLastSubmissionTime: failed to unmarshal status", "statusVal", statusVal)
+ return ""
+ }
+ return status.LastTransmission
+}
+
+// VerifyTelemetry checks that the telemetry ConfigMap has a non-empty lastTransmission field in its status key.
+func VerifyTelemetry(ctx context.Context, deployment *Deployment, prevVal string) {
+ logf.Log.Info("VerifyTelemetry: start")
+ gomega.Eventually(func() bool {
+ currentVal := GetTelemetryLastSubmissionTime(ctx, deployment)
+ if currentVal != "" && currentVal != prevVal {
+ logf.Log.Info("VerifyTelemetry: success", "previous", prevVal, "current", currentVal)
+ return true
+ }
+ return false
+ }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true))
+}
+
+// TriggerTelemetrySubmission updates or adds the 'test_submission' key in the telemetry ConfigMap with a JSON value containing a random number.
+func TriggerTelemetrySubmission(ctx context.Context, deployment *Deployment) {
+ const (
+ configMapName = "splunk-operator-manager-telemetry"
+ testKey = "test_submission"
+ )
+
+ // Generate a random number
+ rand.Seed(time.Now().UnixNano())
+ randomNumber := rand.Intn(1000)
+
+ // Create the JSON value
+ jsonValue, err := json.Marshal(map[string]int{"value": randomNumber})
+ if err != nil {
+ logf.Log.Error(err, "Failed to marshal JSON value")
+ return
+ }
+
+ // Update the ConfigMap
+ cm := &corev1.ConfigMap{}
+ err = deployment.testenv.GetKubeClient().Get(ctx, client.ObjectKey{Name: configMapName, Namespace: "splunk-operator"}, cm)
+ if err != nil {
+ logf.Log.Error(err, "Failed to get ConfigMap")
+ return
+ }
+
+ // Update the test_submission key
+ cm.Data[testKey] = string(jsonValue)
+ err = deployment.testenv.GetKubeClient().Update(ctx, cm)
+ if err != nil {
+ logf.Log.Error(err, "Failed to update ConfigMap")
+ return
+ }
+
+ logf.Log.Info("Successfully updated telemetry ConfigMap", "key", testKey, "value", jsonValue)
+}
diff --git a/test/trigger-tests.sh b/test/trigger-tests.sh
index dc967546d..b04698e0c 100644
--- a/test/trigger-tests.sh
+++ b/test/trigger-tests.sh
@@ -141,7 +141,9 @@ if [[ -z "${DEBUG}" ]]; then
export DEBUG="${DEBUG_RUN}"
fi
-
+# Always set telemetry test to true before running tests
+echo "Setting telemetry test to true"
+kubectl patch configmap splunk-operator-manager-telemetry -n splunk-operator --type merge -p '{"data":{"status":"{\"test\":\"true\",\"lastTransmission\":\"\"}"}}'
echo "Skipping following test :: ${TEST_TO_SKIP}"