Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions api/v4/ingestorcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,6 @@ type IngestorClusterStatus struct {
// Auxillary message describing CR status
Message string `json:"message"`

// Credential secret version to track changes to the secret and trigger rolling restart of indexer cluster peers when the secret is updated
CredentialSecretVersion string `json:"credentialSecretVersion,omitempty"`

// Service account to track changes to the service account and trigger rolling restart of indexer cluster peers when the service account is updated
ServiceAccount string `json:"serviceAccount,omitempty"`
}

// +kubebuilder:object:root=true
Expand Down
10 changes: 0 additions & 10 deletions config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4612,11 +4612,6 @@ spec:
description: App Framework version info for future use
type: integer
type: object
credentialSecretVersion:
description: Credential secret version to track changes to the secret
and trigger rolling restart of indexer cluster peers when the secret
is updated
type: string
message:
description: Auxillary message describing CR status
type: string
Expand Down Expand Up @@ -4647,11 +4642,6 @@ spec:
selector:
description: Selector for pods used by HorizontalPodAutoscaler
type: string
serviceAccount:
description: Service account to track changes to the service account
and trigger rolling restart of indexer cluster peers when the service
account is updated
type: string
telAppInstalled:
description: Telemetry App installation flag
type: boolean
Expand Down
1,254 changes: 322 additions & 932 deletions docs/IndexIngestionSeparation.md

Large diffs are not rendered by default.

Binary file added docs/images/index_ingestion_separation.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
18 changes: 18 additions & 0 deletions pkg/splunk/common/paths.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,4 +35,22 @@ const (

//OperatorMountLocalServerConf
OperatorMountLocalServerConf = "/mnt/splunk-operator/local/server.conf"

//OperatorClusterManagerAppsLocalOutputsConf
OperatorClusterManagerAppsLocalOutputsConf = "/opt/splk/etc/manager-apps/splunk-operator/local/outputs.conf"

//OperatorClusterManagerAppsLocalInputsConf
OperatorClusterManagerAppsLocalInputsConf = "/opt/splk/etc/manager-apps/splunk-operator/local/inputs.conf"

//OperatorClusterManagerAppsLocalDefaultModeConf
OperatorClusterManagerAppsLocalDefaultModeConf = "/opt/splk/etc/manager-apps/splunk-operator/local/default-mode.conf"

//OperatorMountLocalOutputsConf
OperatorMountLocalOutputsConf = "/mnt/splunk-operator/local/outputs.conf"

//OperatorMountLocalInputsConf
OperatorMountLocalInputsConf = "/mnt/splunk-operator/local/inputs.conf"

//OperatorMountLocalDefaultModeConf
OperatorMountLocalDefaultModeConf = "/mnt/splunk-operator/local/default-mode.conf"
)
81 changes: 81 additions & 0 deletions pkg/splunk/enterprise/clustermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"

"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/log"
Expand Down Expand Up @@ -313,12 +314,92 @@ func getClusterManagerStatefulSet(ctx context.Context, client splcommon.Controll
if smartStoreConfigMap != nil {
setupInitContainer(&ss.Spec.Template, cr.Spec.Image, cr.Spec.ImagePullPolicy, commandForCMSmartstore, cr.Spec.CommonSplunkSpec.EtcVolumeStorageConfig.EphemeralStorage)
}

// If a queue config ConfigMap exists for this CM, add a separate init container and volume.
setupCMQueueConfigInitContainer(ctx, client, cr, ss)
// Setup App framework staging volume for apps
setupAppsStagingVolume(ctx, client, cr, &ss.Spec.Template, &cr.Spec.AppFrameworkConfig)

return ss, err
}

// setupCMQueueConfigInitContainer adds a dedicated init container and ConfigMap volume for queue config
// to the ClusterManager StatefulSet if the queue config ConfigMap exists. This is a separate init container
// from the smartstore "init" container — it runs independently and symlinks outputs.conf, inputs.conf,
// and default-mode.conf from the queue config ConfigMap mount into manager-apps/splunk-operator/local/.
func setupCMQueueConfigInitContainer(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager, ss *appsv1.StatefulSet) {
configMapName := GetCMQueueConfigMapName(cr.GetName())
// Only add the init container if the queue config ConfigMap exists.
_, err := splctrl.GetConfigMap(ctx, client, types.NamespacedName{Name: configMapName, Namespace: cr.GetNamespace()})
if err != nil {
// ConfigMap doesn't exist yet — no queue config configured for this CM.
Copy link

Copilot AI Feb 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The ClusterManager init container setup function silently returns when the ConfigMap doesn't exist. While this is intentional (the ConfigMap is only created when IndexerCluster has queueRef set), the silent return makes debugging difficult. Consider logging at Info level when the ConfigMap is not found to make it clear that queue config init is being skipped.

This would help operators understand why init containers may or may not be present on ClusterManager pods.

Suggested change
// ConfigMap doesn't exist yet — no queue config configured for this CM.
logger := log.FromContext(ctx)
if k8serrors.IsNotFound(err) {
// ConfigMap doesn't exist yet — no queue config configured for this CM.
logger.Info("Queue config ConfigMap not found; skipping queue config init container",
"configMap", configMapName,
"namespace", cr.GetNamespace(),
"clusterManager", cr.GetName())
} else {
// Unexpected error retrieving ConfigMap — skip init container but log the error for debugging.
logger.Error(err, "Failed to get queue config ConfigMap; skipping queue config init container",
"configMap", configMapName,
"namespace", cr.GetNamespace(),
"clusterManager", cr.GetName())
}

Copilot uses AI. Check for mistakes.
return
}

// Add queue config ConfigMap volume to pod spec.
// defaultMode 420 (0644) must be set explicitly to match what Kubernetes stores —
// omitting it causes MergePodUpdates to see a diff on every reconcile (infinite update loop).
defaultMode := int32(420)
ss.Spec.Template.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{
Name: cmQueueConfigVolName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: configMapName,
},
DefaultMode: &defaultMode,
},
},
})

// Determine etc volume mount name (ephemeral vs PVC)
var etcVolMntName string
if cr.Spec.CommonSplunkSpec.EtcVolumeStorageConfig.EphemeralStorage {
etcVolMntName = fmt.Sprintf(splcommon.SplunkMountNamePrefix, splcommon.EtcVolumeStorage)
} else {
etcVolMntName = fmt.Sprintf(splcommon.PvcNamePrefix, splcommon.EtcVolumeStorage)
}

runAsUser := int64(41812)
runAsNonRoot := true
privileged := false

initContainer := corev1.Container{
Name: "init-cm-queue-config",
Image: ss.Spec.Template.Spec.Containers[0].Image,
ImagePullPolicy: ss.Spec.Template.Spec.Containers[0].ImagePullPolicy,
Command: []string{"bash", "-c", commandForCMQueueConfig},
VolumeMounts: []corev1.VolumeMount{
{Name: etcVolMntName, MountPath: "/opt/splk/etc"},
{Name: cmQueueConfigVolName, MountPath: cmQueueConfigMountPath},
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0.25"),
corev1.ResourceMemory: resource.MustParse("128Mi"),
},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("512Mi"),
},
},
SecurityContext: &corev1.SecurityContext{
RunAsUser: &runAsUser,
RunAsNonRoot: &runAsNonRoot,
AllowPrivilegeEscalation: &[]bool{false}[0],
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
Add: []corev1.Capability{"NET_BIND_SERVICE"},
},
Privileged: &privileged,
SeccompProfile: &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeRuntimeDefault,
},
},
}
ss.Spec.Template.Spec.InitContainers = append(ss.Spec.Template.Spec.InitContainers, initContainer)
}

// CheckIfsmartstoreConfigMapUpdatedToPod checks if the smartstore configMap is updated on Pod or not
func CheckIfsmartstoreConfigMapUpdatedToPod(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) error {
reqLogger := log.FromContext(ctx)
Expand Down
4 changes: 4 additions & 0 deletions pkg/splunk/enterprise/clustermanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ func TestApplyClusterManager(t *testing.T) {
{MetaName: "*v1.Secret-test-splunk-stack1-cluster-manager-secret-v1"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-queue-config"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"},
{MetaName: "*v4.ClusterManager-test-stack1"},
Expand All @@ -97,6 +98,7 @@ func TestApplyClusterManager(t *testing.T) {
{MetaName: "*v1.Secret-test-splunk-stack1-cluster-manager-secret-v1"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-queue-config"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"},
Expand Down Expand Up @@ -578,6 +580,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) {
{MetaName: "*v1.Secret-test-splunk-stack1-cluster-manager-secret-v1"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-queue-config"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"},
Expand All @@ -601,6 +604,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) {
{MetaName: "*v1.Secret-test-splunk-stack1-cluster-manager-secret-v1"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-queue-config"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"},
Expand Down
20 changes: 15 additions & 5 deletions pkg/splunk/enterprise/configuration.go
Original file line number Diff line number Diff line change
Expand Up @@ -880,17 +880,27 @@ func updateSplunkPodTemplateWithConfig(ctx context.Context, client splcommon.Con

smartstoreConfigMap := getSmartstoreConfigMap(ctx, client, cr, instanceType)
if smartstoreConfigMap != nil {
items := []corev1.KeyToPath{
{Key: "indexes.conf", Path: "indexes.conf", Mode: &configMapVolDefaultMode},
{Key: "server.conf", Path: "server.conf", Mode: &configMapVolDefaultMode},
{Key: configToken, Path: configToken, Mode: &configMapVolDefaultMode},
}
// When queue config keys are present (written by applyIdxcQueueConfigToCM),
// include them so the init container symlinks resolve correctly on the CM pod.
if _, ok := smartstoreConfigMap.Data["outputs.conf"]; ok {
items = append(items,
corev1.KeyToPath{Key: "outputs.conf", Path: "outputs.conf", Mode: &configMapVolDefaultMode},
corev1.KeyToPath{Key: "inputs.conf", Path: "inputs.conf", Mode: &configMapVolDefaultMode},
corev1.KeyToPath{Key: "default-mode.conf", Path: "default-mode.conf", Mode: &configMapVolDefaultMode},
)
}
addSplunkVolumeToTemplate(podTemplateSpec, "mnt-splunk-operator", "/mnt/splunk-operator/local/", corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: smartstoreConfigMap.GetName(),
},
DefaultMode: &configMapVolDefaultMode,
Items: []corev1.KeyToPath{
{Key: "indexes.conf", Path: "indexes.conf", Mode: &configMapVolDefaultMode},
{Key: "server.conf", Path: "server.conf", Mode: &configMapVolDefaultMode},
{Key: configToken, Path: configToken, Mode: &configMapVolDefaultMode},
},
Items: items,
},
})

Expand Down
106 changes: 72 additions & 34 deletions pkg/splunk/enterprise/indexercluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,57 @@ import (
// NewSplunkClientFunc function pointer type
type NewSplunkClientFunc func(managementURI, username, password string) *splclient.SplunkClient

// applyIdxcQueueConfigToCM builds and applies the ClusterManager queue config ConfigMap
// (splunk-<cmName>-clustermanager-queue-config) with outputs.conf, inputs.conf, and default-mode.conf.
// The CM bundle push infrastructure distributes these files to all indexer peers via manager-apps/
// splunk-operator/local/ — no pod-by-pod REST calls needed. A dedicated init container on the CM pod
// symlinks the conf files from the ConfigMap mount before Splunk starts.
// Returns (true, nil) when content changed (bundle push trigger needed), (false, nil) when unchanged.
func applyIdxcQueueConfigToCM(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (bool, error) {
cmName := cr.Spec.ClusterManagerRef.Name
cmNamespace := cr.GetNamespace()

// Fetch the ClusterManager CR so we can update its BundlePushTracker on content change
cmCR := &enterpriseApi.ClusterManager{}
if err := client.Get(ctx, types.NamespacedName{Name: cmName, Namespace: cmNamespace}, cmCR); err != nil {
return false, fmt.Errorf("applyIdxcQueueConfigToCM: failed to get ClusterManager %s: %w", cmName, err)
}

// Resolve Queue and ObjectStorage CRs to get credentials and endpoints
qosCfg, err := ResolveQueueAndObjectStorage(ctx, client, cr, cr.Spec.QueueRef, cr.Spec.ObjectStorageRef, cr.Spec.ServiceAccount)
if err != nil {
return false, fmt.Errorf("applyIdxcQueueConfigToCM: failed to resolve queue/OS config: %w", err)
}

// Build the dedicated CM queue config ConfigMap data.
// outputs.conf and inputs.conf differ: outputs adds send_interval and encoding_format.
// default-mode.conf uses isIndexer=true (no indexerPipe stanza).
inputs, outputs := getQueueAndObjectStorageInputsForIndexerConfFiles(&qosCfg.Queue, &qosCfg.OS, qosCfg.AccessKey, qosCfg.SecretKey)
data := map[string]string{
"app.conf": generateQueueConfigAppConf("Splunk Operator ClusterManager Queue Config"),
"outputs.conf": buildQueueConfStanza(qosCfg.Queue.SQS.Name, outputs),
"inputs.conf": buildQueueConfStanza(qosCfg.Queue.SQS.Name, inputs),
"default-mode.conf": generateIdxcDefaultModeConf(),
"local.meta": generateQueueConfigLocalMeta(),
}

changed, err := applyQueueConfigMap(ctx, client, GetCMQueueConfigMapName(cmName), cmNamespace, cmCR, data)
if err != nil {
return false, fmt.Errorf("applyIdxcQueueConfigToCM: failed to apply ConfigMap: %w", err)
}

if changed {
// Signal ClusterManager to run bundle push on next reconcile
cmCR.Status.BundlePushTracker.NeedToPushManagerApps = true
cmCR.Status.BundlePushTracker.LastCheckInterval = 0
if err := client.Status().Update(ctx, cmCR); err != nil {
return true, fmt.Errorf("applyIdxcQueueConfigToCM: failed to update ClusterManager BundlePushTracker: %w", err)
}
}

return changed, nil
}

// ApplyIndexerClusterManager reconciles the state of a Splunk Enterprise indexer cluster.
func ApplyIndexerClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (reconcile.Result, error) {

Expand Down Expand Up @@ -159,6 +210,16 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller
return result, err
}

// Apply queue config to ClusterManager's smartstore ConfigMap for bundle push distribution.
// Called unconditionally — ApplyConfigMap skips write when content unchanged.
if cr.Spec.QueueRef.Name != "" && cr.Spec.ClusterManagerRef.Name != "" {
_, err = applyIdxcQueueConfigToCM(ctx, client, cr)
if err != nil {
eventPublisher.Warning(ctx, "applyIdxcQueueConfigToCM", fmt.Sprintf("failed to apply queue config to ClusterManager: %s", err.Error()))
return result, err
}
}

// create or update statefulset for the indexers
statefulSet, err := getIndexerStatefulSet(ctx, client, cr)
if err != nil {
Expand Down Expand Up @@ -245,40 +306,6 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller

// no need to requeue if everything is ready
if cr.Status.Phase == enterpriseApi.PhaseReady {
qosCfg, err := ResolveQueueAndObjectStorage(ctx, client, cr, cr.Spec.QueueRef, cr.Spec.ObjectStorageRef, cr.Spec.ServiceAccount)
if err != nil {
scopedLog.Error(err, "Failed to resolve Queue/ObjectStorage config")
return result, err
}

secretChanged := cr.Status.CredentialSecretVersion != qosCfg.Version
serviceAccountChanged := cr.Status.ServiceAccount != cr.Spec.ServiceAccount

// If queue is updated
if cr.Spec.QueueRef.Name != "" {
if secretChanged || serviceAccountChanged {
mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client)
err = mgr.updateIndexerConfFiles(ctx, cr, &qosCfg.Queue, &qosCfg.OS, qosCfg.AccessKey, qosCfg.SecretKey, client)
if err != nil {
eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error()))
scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation")
return result, err
}

for i := int32(0); i < cr.Spec.Replicas; i++ {
idxcClient := mgr.getClient(ctx, i)
err = idxcClient.RestartSplunk()
if err != nil {
return result, err
}
scopedLog.Info("Restarted splunk", "indexer", i)
}

cr.Status.CredentialSecretVersion = qosCfg.Version
cr.Status.ServiceAccount = cr.Spec.ServiceAccount
}
}

//update MC
//Retrieve monitoring console ref from CM Spec
cmMonitoringConsoleConfigRef, err := RetrieveCMSpec(ctx, client, cr)
Expand Down Expand Up @@ -1383,6 +1410,17 @@ func getQueueAndPipelineInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec
return
}

// generateIdxcDefaultModeConf builds default-mode.conf INI content for an IndexerCluster peer.
// Uses getPipelineInputsForConfFile(true) — isIndexer=true omits the indexerPipe stanza.
func generateIdxcDefaultModeConf() string {
pipelineInputs := getPipelineInputsForConfFile(true)
var b strings.Builder
for _, input := range pipelineInputs {
fmt.Fprintf(&b, "[%s]\n%s = %s\n\n", input[0], input[1], input[2])
}
return b.String()
}

// Tells if there is an image migration from 8.x.x to 9.x.x
func imageUpdatedTo9(previousImage string, currentImage string) bool {
// If there is no colon, version can't be detected
Expand Down
Loading
Loading