diff --git a/.gitignore b/.gitignore index 4846768ad..913088c0e 100644 --- a/.gitignore +++ b/.gitignore @@ -92,6 +92,8 @@ clair-scanner-logs release-* deploy/olm-certified *junit.xml +# Test result XML files +*_unit_test.xml # Bias Language Linter .biaslanguage/ bin/ diff --git a/README.md b/README.md new file mode 100644 index 000000000..cf7af9c83 --- /dev/null +++ b/README.md @@ -0,0 +1,167 @@ +# Splunk Operator for Kubernetes + +[![License](https://img.shields.io/:license-apache-blue.svg)](http://www.apache.org/licenses/LICENSE-2.0.html) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/splunk/splunk-operator)](https://pkg.go.dev/github.com/splunk/splunk-operator) +[![Go Report Card](https://goreportcard.com/badge/github.com/splunk/splunk-operator)](https://goreportcard.com/report/github.com/splunk/splunk-operator) +[![Coverage Status](https://coveralls.io/repos/github/splunk/splunk-operator/badge.svg?branch=master)](https://coveralls.io/github/splunk/splunk-operator?branch=master) +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fsplunk%2Fsplunk-operator.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fsplunk%2Fsplunk-operator?ref=badge_shield) +[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/splunk/splunk-operator) + +The Splunk Operator for Kubernetes (SOK) makes it easy for Splunk +Administrators to deploy and operate Enterprise deployments in a Kubernetes +infrastructure. Packaged as a container, it uses the +[operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) +to manage Splunk-specific [custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/), +following best practices to manage all the underlying Kubernetes objects for you. + +This repository is used to build the Splunk +[Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) +for Kubernetes (SOK). If you are just looking for documentation on how to +deploy and use the latest release, please see the +[Getting Started Documentation](docs/README.md). + +## Splunk General Terms Acceptance + +Starting with operator version 3.0.0, which includes support for Splunk Enterprise version 10.x, an additional Docker-Splunk specific parameter is required to start containers. **This is a breaking change, and user action is required.** + +Starting in 10.x image versions of Splunk Enterprise, license acceptance requires an additional `SPLUNK_GENERAL_TERMS=--accept-sgt-current-at-splunk-com` argument. This indicates that users have read and accepted the current/latest version of the Splunk General Terms, available at https://www.splunk.com/en_us/legal/splunk-general-terms.html as may be updated from time to time. Unless you have jointly executed with Splunk a negotiated version of these General Terms that explicitly supersedes this agreement, by accessing or using Splunk software, you are agreeing to the Splunk General Terms posted at the time of your access and use and acknowledging its applicability to the Splunk software. Please read and make sure you agree to the Splunk General Terms before you access or use this software. Only after doing so should you include the `--accept-sgt-current-at-splunk-com` flag to indicate your acceptance of the current/latest Splunk General Terms and launch this software. All examples below have been updated with this change. + +If you use the below examples and the ‘--accept-sgt-current-at-splunk-com’ flag, you are indicating that you have read and accepted the current/latest version of the Splunk General Terms, as may be updated from time to time, and acknowledging its applicability to this software - as noted above. + +By default, the SPLUNK_GENERAL_TERMS environment variable will be set to an empty string. You must either manually update it to have the required additional value `--accept-sgt-current-at-splunk-com` in the splunk-operator-controller-manager deployment, or you can pass the `SPLUNK_GENERAL_TERMS` parameter with the required additional value to the `make deploy` command. + +``` +make deploy IMG=docker.io/splunk/splunk-operator: WATCH_NAMESPACE="namespace1" RELATED_IMAGE_SPLUNK_ENTERPRISE="splunk/splunk:edge" SPLUNK_GENERAL_TERMS="--accept-sgt-current-at-splunk-com" +``` + +For more information about this change, see the [Splunk General Terms Migration Documentation](docs/SplunkGeneralTermsMigration.md). + +## Prerequisites + +You must have [Docker Engine](https://docs.docker.com/install/) installed to +build the Splunk Operator. + +This project uses [Go modules](https://blog.golang.org/using-go-modules), +and requires [golang](https://golang.org/doc/install) 1.23.0 or later. +You must `export GO111MODULE=on` if cloning these repositories into your +`$GOPATH` (not recommended). + +The [Kubernetes Operator SDK](https://github.com/operator-framework/operator-sdk) +must also be installed to build this project. + +``` +git clone -b v1.31.0 https://github.com/operator-framework/operator-sdk +cd operator-sdk +make tidy +make install +``` + +You may need to add `$GOPATH/bin` to your path to run the `operator-sdk` +command line tool: + +``` +export PATH=${PATH}:${GOPATH}/bin +``` + +It is also recommended that you install the following golang tools, +which are used by various `make` targets: + +```shell +go install golang.org/x/lint/golint +go install golang.org/x/tools/cmd/cover +go install github.com/mattn/goveralls +go get -u github.com/mikefarah/yq/v3 +go get -u github.com/go-delve/delve/cmd/dlv +``` + +## Cloning this repository + +```shell +git clone git@github.com:splunk/splunk-operator.git +cd splunk-operator +``` + +## Repository overview + +This repository consists of the following code used to build the splunk-operator binary: + +* `main.go`: Provides the main() function, where everything begins +* `apis/`: Source code for the operator's custom resource definition types +* `controllers/`: Used to register controllers that watch for changes to custom resources +* `pkg/splunk/enterprise/`: Source code for controllers that manage Splunk Enterprise resources +* `pkg/splunk/controller/`: Common code shared across Splunk controllers +* `pkg/splunk/common/`: Common code used by most other splunk packages +* `pkg/splunk/client/`: Simple client for Splunk Enterprise REST API +* `pkg/splunk/test/`: Common code used by other packages for unit testing + +`main()` uses `controllers` to register all the `enterprise` controllers +that manage custom resources by watching for Kubernetes events. +The `enterprise` controllers are implemented using common code provided +by the `controllers` package. The `enterprise` controllers also use the REST API client +provided in the `pkg/splunk/client` package. The types provided by `apis/` and +common code in the `pkg/splunk/common/` package are used universally. Note that the +source code for `main()` is generated from a template provided by the Operator SDK. + +In addition to the source code, this repository includes: + +* `tools`: Build scripts, templates, etc. used to build the container image +* `config`: Kubernetes YAML templates used to install the Splunk Operator +* `docs`: Getting Started Guide and other documentation in Markdown format +* `test`: Integration test framework built using Ginko. See [docs](test/README.md) for more info. + +## Building the operator + +You can build the operator by just running `make`. + +Other make targets include (more info below): + +* `make all`: builds `manager` executable +* `make test`: Runs unit tests with Coveralls code coverage output to coverage.out +* `make scorecard`: Runs operator-sdk scorecard tests using OLM installation bundle +* `make generate`: runs operator-generate k8s, crds and csv commands, updating installation YAML files and OLM bundle +* `make docker-build`: generates `splunk-operator` container image example `make docker-build IMG=docker.io/splunk/splunk-operator:` +* `make docker-buildx`: generates `splunk-operator` container image for multiple platforms, example `make docker-buildx IMG=docker.io/splunk/splunk-operator:` +* `make docker-push`: push docker image to given repository example `make docker-push IMG=docker.io/splunk/splunk-operator:` +* `make clean`: removes the binary build output and `splunk-operator` container image example `make docker-push IMG=docker.io/splunk/splunk-operator:` +* `make run`: runs the Splunk Operator locally, monitoring the Kubernetes cluster configured in your current `kubectl` context +* `make fmt`: runs `go fmt` on all `*.go` source files in this project +* `make bundle-build`: generates `splunk-operator-bundle` bundle container image for OLM example `make bundle-build IMAGE_TAG_BASE=docker.io/splunk/splunk-operator VERSION= IMG=docker.io/splunk/splunk-operator:` +* `make bundle-push`: push OLM bundle docker image to given repository example `make bundle-push IMAGE_TAG_BASE=docker.io/splunk/splunk-operator VERSION= IMG=docker.io/splunk/splunk-operator:` +* `make catalog-build`: generates `splunk-operator-catalog` catalog container image example `make catalog-build IMAGE_TAG_BASE=docker.io/splunk/splunk-operator VERSION= IMG=docker.io/splunk/splunk-operator:` +* `make catalog-push`: push catalog docker image to given repository example`make catalog-push IMAGE_TAG_BASE=docker.io/splunk/splunk-operator VERSION= IMG=docker.io/splunk/splunk-operator:` + +## Deploying the Splunk Operator +`make deploy` command will deploy all the necessary resources to run Splunk Operator like RBAC policies, services, configmaps, deployment. Operator will be installed in `splunk-operator` namespace. If `splunk-operator` namespace does not exist, it will create the namespace. By default `make deploy` will install operator clusterwide. Operator will watch all the namespaces for any splunk enterprise custom resources. + +```shell +make deploy IMG=docker.io/splunk/splunk-operator: +``` + +If you want operator for specific namespace then you must pass `WATCH_NAMESPACE` parameter to `make deploy` command + +``` +make deploy IMG=docker.io/splunk/splunk-operator: WATCH_NAMESPACE="namespace1" +``` + +If you want operator to use specific version of splunk instance, then you must pass `RELATED_IMAGE_SPLUNK_ENTERPRISE` parameter to `make deploy` command + +``` +make deploy IMG=docker.io/splunk/splunk-operator: WATCH_NAMESPACE="namespace1" RELATED_IMAGE_SPLUNK_ENTERPRISE="splunk/splunk:edge" +``` + +Use this to run the operator as a local foreground process on your machine: + +```shell +make run +``` + +This will use your current Kubernetes context from `~/.kube/config` to manage +resources in your current namespace. + +Please see the [Getting Started Documentation](docs/README.md) for more +information, including instructions on how to install the operator in your +cluster. + + +## License +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fsplunk%2Fsplunk-operator.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fsplunk%2Fsplunk-operator?ref=badge_large) \ No newline at end of file diff --git a/api/platform/v4/groupversion_info.go b/api/platform/v4/groupversion_info.go new file mode 100644 index 000000000..17832d272 --- /dev/null +++ b/api/platform/v4/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v4 contains API Schema definitions for the platform v4 API group. +// +kubebuilder:object:generate=true +// +groupName=platform.splunk.com +package v4 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "platform.splunk.com", Version: "v4"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/platform/v4/platformconfig_types.go b/api/platform/v4/platformconfig_types.go new file mode 100644 index 000000000..eb25ededc --- /dev/null +++ b/api/platform/v4/platformconfig_types.go @@ -0,0 +1,242 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PlatformConfigSpec defines the desired state of PlatformConfig. +type PlatformConfigSpec struct { + // Secrets configuration for secret management. + // +optional + Secrets SecretConfig `json:"secrets,omitempty"` + + // Certificates configuration for TLS certificate management. + // +optional + Certificates CertificatesConfig `json:"certificates,omitempty"` +} + +// SecretConfig configures secret management. +type SecretConfig struct { + // Provider specifies the secret provider to use. + // Valid values: "kubernetes", "external-secrets", "csi" + // Default: "kubernetes" + // +optional + // +kubebuilder:validation:Enum=kubernetes;external-secrets;csi + Provider string `json:"provider,omitempty"` + + // GenerateFallback indicates whether to generate fallback secrets. + // Only for development environments. + // Default: false + // +optional + GenerateFallback bool `json:"generateFallback,omitempty"` + + // VersioningEnabled enables versioned secrets for Splunk resources. + // Default: true + // +optional + VersioningEnabled bool `json:"versioningEnabled,omitempty"` + + // VersionsToKeep is how many versions to keep. + // Default: 3 + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=10 + VersionsToKeep int `json:"versionsToKeep,omitempty"` + + // CSI configures CSI-based secret management. + // Only used when Provider is "csi". + // +optional + CSI *CSISecretConfig `json:"csi,omitempty"` +} + +// CertificatesConfig configures TLS certificate management. +type CertificatesConfig struct { + // Provider specifies the certificate provider to use. + // Valid values: "user-provided", "cert-manager" + // Default: "cert-manager" + // +optional + // +kubebuilder:validation:Enum=user-provided;cert-manager + Provider string `json:"provider,omitempty"` + + // Naming configures certificate naming pattern. + // Used to construct certificate secret names for each service instance. + // +optional + Naming *CertificateNamingConfig `json:"naming,omitempty"` + + // UserProvided configures user-provided certificates. + // Only used when Provider is "user-provided". + // +optional + UserProvided *UserProvidedCertificates `json:"userProvided,omitempty"` + + // CertManager configures cert-manager integration. + // Only used when Provider is "cert-manager". + // +optional + CertManager *CertManagerConfig `json:"certManager,omitempty"` +} + +// CertificateNamingConfig configures certificate naming pattern. +type CertificateNamingConfig struct { + // Pattern is the naming pattern for certificate secrets. + // Supports variable substitution: + // - ${namespace}: Kubernetes namespace + // - ${service}: Service type (e.g., "standalone", "clustermanager") + // - ${instance}: Instance name (e.g., "my-splunk") + // Example: "${service}-${instance}-tls" -> "standalone-my-splunk-tls" + // Default: "${service}-${instance}-tls" + // +optional + // +kubebuilder:default="${service}-${instance}-tls" + Pattern string `json:"pattern,omitempty"` +} + +// UserProvidedCertificates specifies user-provided certificate configuration. +type UserProvidedCertificates struct { + // Pattern overrides the global naming pattern for user-provided certificates. + // If not specified, uses the pattern from CertificatesConfig.Naming. + // +optional + Pattern string `json:"pattern,omitempty"` + + // Namespace is the namespace where the certificate secrets are located. + // If not specified, uses the same namespace as the resource. + // +optional + Namespace string `json:"namespace,omitempty"` +} + +// CertManagerConfig configures cert-manager integration. +type CertManagerConfig struct { + // Enabled indicates whether cert-manager integration is enabled. + // Default: true + // +optional + Enabled bool `json:"enabled,omitempty"` + + // IssuerName is the name of the cert-manager Issuer or ClusterIssuer. + // +optional + IssuerName string `json:"issuerName,omitempty"` + + // IssuerKind is the kind of issuer (Issuer or ClusterIssuer). + // Default: "ClusterIssuer" + // +optional + // +kubebuilder:validation:Enum=Issuer;ClusterIssuer + IssuerKind string `json:"issuerKind,omitempty"` +} + +// CSISecretConfig configures CSI-based secret management. +type CSISecretConfig struct { + // Driver is the CSI driver to use. + // Default: "secrets-store.csi.k8s.io" + // +optional + // +kubebuilder:default="secrets-store.csi.k8s.io" + Driver string `json:"driver,omitempty"` + + // DefaultProvider is the default CSI provider. + // Valid values: "vault", "aws", "azure", "gcp" + // +optional + // +kubebuilder:validation:Enum=vault;aws;azure;gcp + DefaultProvider string `json:"defaultProvider,omitempty"` + + // Naming configures SecretProviderClass naming pattern. + // +optional + Naming *SecretNamingConfig `json:"naming,omitempty"` + + // MountPath is the default mount path for CSI secrets. + // Default: "/mnt/secrets" + // +optional + // +kubebuilder:default="/mnt/secrets" + MountPath string `json:"mountPath,omitempty"` + + // Vault configures Vault-specific settings. + // Only used when DefaultProvider is "vault". + // +optional + Vault *VaultConfig `json:"vault,omitempty"` + + // AWS configures AWS Secrets Manager settings. + // Only used when DefaultProvider is "aws". + // +optional + AWS *AWSSecretsConfig `json:"aws,omitempty"` +} + +// SecretNamingConfig configures secret naming pattern for CSI. +type SecretNamingConfig struct { + // Pattern is the naming pattern for SecretProviderClass resources. + // Supports variable substitution: + // - ${namespace}: Kubernetes namespace + // - ${service}: Service type (e.g., "standalone", "clustermanager") + // - ${instance}: Instance name (e.g., "my-splunk") + // Example: "${service}-${instance}-secrets" -> "standalone-my-splunk-secrets" + // Default: "${service}-${instance}-secrets" + // +optional + // +kubebuilder:default="${service}-${instance}-secrets" + Pattern string `json:"pattern,omitempty"` +} + +// VaultConfig configures Vault CSI provider. +type VaultConfig struct { + // Address is the Vault server address. + // Example: "https://vault.company.com" + // +optional + Address string `json:"address,omitempty"` + + // Role is the Vault role to use for authentication. + // +optional + Role string `json:"role,omitempty"` +} + +// AWSSecretsConfig configures AWS Secrets Manager CSI provider. +type AWSSecretsConfig struct { + // Region is the AWS region. + // Example: "us-west-2" + // +optional + Region string `json:"region,omitempty"` +} + +// PlatformConfigStatus defines the observed state of PlatformConfig. +type PlatformConfigStatus struct { + // Conditions represent the latest available observations of the config's state. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // ObservedGeneration is the generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:storageversion + +// PlatformConfig is the Schema for the platformconfigs API. +type PlatformConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PlatformConfigSpec `json:"spec,omitempty"` + Status PlatformConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PlatformConfigList contains a list of PlatformConfig. +type PlatformConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PlatformConfig `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PlatformConfig{}, &PlatformConfigList{}) +} diff --git a/api/platform/v4/tenantconfig_types.go b/api/platform/v4/tenantconfig_types.go new file mode 100644 index 000000000..65339b8b5 --- /dev/null +++ b/api/platform/v4/tenantconfig_types.go @@ -0,0 +1,79 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TenantConfigSpec defines namespace-specific configuration overrides for the Platform SDK. +// +// Tenant admins create TenantConfigs to override PlatformConfig defaults for their namespace. +// This enables multi-tenancy with different settings per tenant. +type TenantConfigSpec struct { + // Secrets configuration overrides. + // Overrides the cluster-wide PlatformConfig secret settings for this namespace. + // +optional + Secrets SecretConfig `json:"secrets,omitempty"` + + // Certificates configuration overrides. + // Overrides the cluster-wide PlatformConfig certificate settings for this namespace. + // +optional + Certificates CertificatesConfig `json:"certificates,omitempty"` +} + +// TenantConfigStatus defines the observed state of TenantConfig. +type TenantConfigStatus struct { + // Conditions represent the latest available observations of the config's state. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // ObservedGeneration is the generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced,shortName=tc +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Observed Generation",type="integer",JSONPath=".status.observedGeneration" + +// TenantConfig is the Schema for the tenantconfigs API. +// +// TenantConfig defines namespace-specific overrides for the Platform SDK. +// This enables multi-tenancy by allowing different settings per namespace. +type TenantConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TenantConfigSpec `json:"spec,omitempty"` + Status TenantConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TenantConfigList contains a list of TenantConfig. +type TenantConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TenantConfig `json:"items"` +} + +func init() { + SchemeBuilder.Register(&TenantConfig{}, &TenantConfigList{}) +} diff --git a/api/platform/v4/zz_generated.deepcopy.go b/api/platform/v4/zz_generated.deepcopy.go new file mode 100644 index 000000000..746bb1d83 --- /dev/null +++ b/api/platform/v4/zz_generated.deepcopy.go @@ -0,0 +1,392 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v4 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSSecretsConfig) DeepCopyInto(out *AWSSecretsConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSecretsConfig. +func (in *AWSSecretsConfig) DeepCopy() *AWSSecretsConfig { + if in == nil { + return nil + } + out := new(AWSSecretsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISecretConfig) DeepCopyInto(out *CSISecretConfig) { + *out = *in + if in.Naming != nil { + in, out := &in.Naming, &out.Naming + *out = new(SecretNamingConfig) + **out = **in + } + if in.Vault != nil { + in, out := &in.Vault, &out.Vault + *out = new(VaultConfig) + **out = **in + } + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSSecretsConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISecretConfig. +func (in *CSISecretConfig) DeepCopy() *CSISecretConfig { + if in == nil { + return nil + } + out := new(CSISecretConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertManagerConfig) DeepCopyInto(out *CertManagerConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertManagerConfig. +func (in *CertManagerConfig) DeepCopy() *CertManagerConfig { + if in == nil { + return nil + } + out := new(CertManagerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateNamingConfig) DeepCopyInto(out *CertificateNamingConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateNamingConfig. +func (in *CertificateNamingConfig) DeepCopy() *CertificateNamingConfig { + if in == nil { + return nil + } + out := new(CertificateNamingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificatesConfig) DeepCopyInto(out *CertificatesConfig) { + *out = *in + if in.Naming != nil { + in, out := &in.Naming, &out.Naming + *out = new(CertificateNamingConfig) + **out = **in + } + if in.UserProvided != nil { + in, out := &in.UserProvided, &out.UserProvided + *out = new(UserProvidedCertificates) + **out = **in + } + if in.CertManager != nil { + in, out := &in.CertManager, &out.CertManager + *out = new(CertManagerConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificatesConfig. +func (in *CertificatesConfig) DeepCopy() *CertificatesConfig { + if in == nil { + return nil + } + out := new(CertificatesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformConfig) DeepCopyInto(out *PlatformConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformConfig. +func (in *PlatformConfig) DeepCopy() *PlatformConfig { + if in == nil { + return nil + } + out := new(PlatformConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlatformConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformConfigList) DeepCopyInto(out *PlatformConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PlatformConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformConfigList. +func (in *PlatformConfigList) DeepCopy() *PlatformConfigList { + if in == nil { + return nil + } + out := new(PlatformConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlatformConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformConfigSpec) DeepCopyInto(out *PlatformConfigSpec) { + *out = *in + in.Secrets.DeepCopyInto(&out.Secrets) + in.Certificates.DeepCopyInto(&out.Certificates) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformConfigSpec. +func (in *PlatformConfigSpec) DeepCopy() *PlatformConfigSpec { + if in == nil { + return nil + } + out := new(PlatformConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformConfigStatus) DeepCopyInto(out *PlatformConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformConfigStatus. +func (in *PlatformConfigStatus) DeepCopy() *PlatformConfigStatus { + if in == nil { + return nil + } + out := new(PlatformConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretConfig) DeepCopyInto(out *SecretConfig) { + *out = *in + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(CSISecretConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretConfig. +func (in *SecretConfig) DeepCopy() *SecretConfig { + if in == nil { + return nil + } + out := new(SecretConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretNamingConfig) DeepCopyInto(out *SecretNamingConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretNamingConfig. +func (in *SecretNamingConfig) DeepCopy() *SecretNamingConfig { + if in == nil { + return nil + } + out := new(SecretNamingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantConfig) DeepCopyInto(out *TenantConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantConfig. +func (in *TenantConfig) DeepCopy() *TenantConfig { + if in == nil { + return nil + } + out := new(TenantConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TenantConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantConfigList) DeepCopyInto(out *TenantConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TenantConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantConfigList. +func (in *TenantConfigList) DeepCopy() *TenantConfigList { + if in == nil { + return nil + } + out := new(TenantConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TenantConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantConfigSpec) DeepCopyInto(out *TenantConfigSpec) { + *out = *in + in.Secrets.DeepCopyInto(&out.Secrets) + in.Certificates.DeepCopyInto(&out.Certificates) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantConfigSpec. +func (in *TenantConfigSpec) DeepCopy() *TenantConfigSpec { + if in == nil { + return nil + } + out := new(TenantConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantConfigStatus) DeepCopyInto(out *TenantConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantConfigStatus. +func (in *TenantConfigStatus) DeepCopy() *TenantConfigStatus { + if in == nil { + return nil + } + out := new(TenantConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserProvidedCertificates) DeepCopyInto(out *UserProvidedCertificates) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserProvidedCertificates. +func (in *UserProvidedCertificates) DeepCopy() *UserProvidedCertificates { + if in == nil { + return nil + } + out := new(UserProvidedCertificates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultConfig) DeepCopyInto(out *VaultConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultConfig. +func (in *VaultConfig) DeepCopy() *VaultConfig { + if in == nil { + return nil + } + out := new(VaultConfig) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/platform.splunk.com_platformconfigs.yaml b/config/crd/bases/platform.splunk.com_platformconfigs.yaml new file mode 100644 index 000000000..42e8f7413 --- /dev/null +++ b/config/crd/bases/platform.splunk.com_platformconfigs.yaml @@ -0,0 +1,284 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: platformconfigs.platform.splunk.com +spec: + group: platform.splunk.com + names: + kind: PlatformConfig + listKind: PlatformConfigList + plural: platformconfigs + singular: platformconfig + scope: Cluster + versions: + - name: v4 + schema: + openAPIV3Schema: + description: PlatformConfig is the Schema for the platformconfigs API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PlatformConfigSpec defines the desired state of PlatformConfig. + properties: + certificates: + description: Certificates configuration for TLS certificate management. + properties: + certManager: + description: |- + CertManager configures cert-manager integration. + Only used when Provider is "cert-manager". + properties: + enabled: + description: |- + Enabled indicates whether cert-manager integration is enabled. + Default: true + type: boolean + issuerKind: + description: |- + IssuerKind is the kind of issuer (Issuer or ClusterIssuer). + Default: "ClusterIssuer" + enum: + - Issuer + - ClusterIssuer + type: string + issuerName: + description: IssuerName is the name of the cert-manager Issuer + or ClusterIssuer. + type: string + type: object + naming: + description: |- + Naming configures certificate naming pattern. + Used to construct certificate secret names for each service instance. + properties: + pattern: + default: ${service}-${instance}-tls + description: |- + Pattern is the naming pattern for certificate secrets. + Supports variable substitution: + - ${namespace}: Kubernetes namespace + - ${service}: Service type (e.g., "standalone", "clustermanager") + - ${instance}: Instance name (e.g., "my-splunk") + Example: "${service}-${instance}-tls" -> "standalone-my-splunk-tls" + Default: "${service}-${instance}-tls" + type: string + type: object + provider: + description: |- + Provider specifies the certificate provider to use. + Valid values: "user-provided", "cert-manager" + Default: "cert-manager" + enum: + - user-provided + - cert-manager + type: string + userProvided: + description: |- + UserProvided configures user-provided certificates. + Only used when Provider is "user-provided". + properties: + namespace: + description: |- + Namespace is the namespace where the certificate secrets are located. + If not specified, uses the same namespace as the resource. + type: string + pattern: + description: |- + Pattern overrides the global naming pattern for user-provided certificates. + If not specified, uses the pattern from CertificatesConfig.Naming. + type: string + type: object + type: object + secrets: + description: Secrets configuration for secret management. + properties: + csi: + description: |- + CSI configures CSI-based secret management. + Only used when Provider is "csi". + properties: + aws: + description: |- + AWS configures AWS Secrets Manager settings. + Only used when DefaultProvider is "aws". + properties: + region: + description: |- + Region is the AWS region. + Example: "us-west-2" + type: string + type: object + defaultProvider: + description: |- + DefaultProvider is the default CSI provider. + Valid values: "vault", "aws", "azure", "gcp" + enum: + - vault + - aws + - azure + - gcp + type: string + driver: + default: secrets-store.csi.k8s.io + description: |- + Driver is the CSI driver to use. + Default: "secrets-store.csi.k8s.io" + type: string + mountPath: + default: /mnt/secrets + description: |- + MountPath is the default mount path for CSI secrets. + Default: "/mnt/secrets" + type: string + naming: + description: Naming configures SecretProviderClass naming + pattern. + properties: + pattern: + default: ${service}-${instance}-secrets + description: |- + Pattern is the naming pattern for SecretProviderClass resources. + Supports variable substitution: + - ${namespace}: Kubernetes namespace + - ${service}: Service type (e.g., "standalone", "clustermanager") + - ${instance}: Instance name (e.g., "my-splunk") + Example: "${service}-${instance}-secrets" -> "standalone-my-splunk-secrets" + Default: "${service}-${instance}-secrets" + type: string + type: object + vault: + description: |- + Vault configures Vault-specific settings. + Only used when DefaultProvider is "vault". + properties: + address: + description: |- + Address is the Vault server address. + Example: "https://vault.company.com" + type: string + role: + description: Role is the Vault role to use for authentication. + type: string + type: object + type: object + generateFallback: + description: |- + GenerateFallback indicates whether to generate fallback secrets. + Only for development environments. + Default: false + type: boolean + provider: + description: |- + Provider specifies the secret provider to use. + Valid values: "kubernetes", "external-secrets", "csi" + Default: "kubernetes" + enum: + - kubernetes + - external-secrets + - csi + type: string + versioningEnabled: + description: |- + VersioningEnabled enables versioned secrets for Splunk resources. + Default: true + type: boolean + versionsToKeep: + description: |- + VersionsToKeep is how many versions to keep. + Default: 3 + maximum: 10 + minimum: 1 + type: integer + type: object + type: object + status: + description: PlatformConfigStatus defines the observed state of PlatformConfig. + properties: + conditions: + description: Conditions represent the latest available observations + of the config's state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + observedGeneration: + description: ObservedGeneration is the generation observed by the + controller. + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/platform.splunk.com_tenantconfigs.yaml b/config/crd/bases/platform.splunk.com_tenantconfigs.yaml new file mode 100644 index 000000000..50adc1150 --- /dev/null +++ b/config/crd/bases/platform.splunk.com_tenantconfigs.yaml @@ -0,0 +1,305 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: tenantconfigs.platform.splunk.com +spec: + group: platform.splunk.com + names: + kind: TenantConfig + listKind: TenantConfigList + plural: tenantconfigs + shortNames: + - tc + singular: tenantconfig + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.observedGeneration + name: Observed Generation + type: integer + name: v4 + schema: + openAPIV3Schema: + description: |- + TenantConfig is the Schema for the tenantconfigs API. + + TenantConfig defines namespace-specific overrides for the Platform SDK. + This enables multi-tenancy by allowing different settings per namespace. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + TenantConfigSpec defines namespace-specific configuration overrides for the Platform SDK. + + Tenant admins create TenantConfigs to override PlatformConfig defaults for their namespace. + This enables multi-tenancy with different settings per tenant. + properties: + certificates: + description: |- + Certificates configuration overrides. + Overrides the cluster-wide PlatformConfig certificate settings for this namespace. + properties: + certManager: + description: |- + CertManager configures cert-manager integration. + Only used when Provider is "cert-manager". + properties: + enabled: + description: |- + Enabled indicates whether cert-manager integration is enabled. + Default: true + type: boolean + issuerKind: + description: |- + IssuerKind is the kind of issuer (Issuer or ClusterIssuer). + Default: "ClusterIssuer" + enum: + - Issuer + - ClusterIssuer + type: string + issuerName: + description: IssuerName is the name of the cert-manager Issuer + or ClusterIssuer. + type: string + type: object + naming: + description: |- + Naming configures certificate naming pattern. + Used to construct certificate secret names for each service instance. + properties: + pattern: + default: ${service}-${instance}-tls + description: |- + Pattern is the naming pattern for certificate secrets. + Supports variable substitution: + - ${namespace}: Kubernetes namespace + - ${service}: Service type (e.g., "standalone", "clustermanager") + - ${instance}: Instance name (e.g., "my-splunk") + Example: "${service}-${instance}-tls" -> "standalone-my-splunk-tls" + Default: "${service}-${instance}-tls" + type: string + type: object + provider: + description: |- + Provider specifies the certificate provider to use. + Valid values: "user-provided", "cert-manager" + Default: "cert-manager" + enum: + - user-provided + - cert-manager + type: string + userProvided: + description: |- + UserProvided configures user-provided certificates. + Only used when Provider is "user-provided". + properties: + namespace: + description: |- + Namespace is the namespace where the certificate secrets are located. + If not specified, uses the same namespace as the resource. + type: string + pattern: + description: |- + Pattern overrides the global naming pattern for user-provided certificates. + If not specified, uses the pattern from CertificatesConfig.Naming. + type: string + type: object + type: object + secrets: + description: |- + Secrets configuration overrides. + Overrides the cluster-wide PlatformConfig secret settings for this namespace. + properties: + csi: + description: |- + CSI configures CSI-based secret management. + Only used when Provider is "csi". + properties: + aws: + description: |- + AWS configures AWS Secrets Manager settings. + Only used when DefaultProvider is "aws". + properties: + region: + description: |- + Region is the AWS region. + Example: "us-west-2" + type: string + type: object + defaultProvider: + description: |- + DefaultProvider is the default CSI provider. + Valid values: "vault", "aws", "azure", "gcp" + enum: + - vault + - aws + - azure + - gcp + type: string + driver: + default: secrets-store.csi.k8s.io + description: |- + Driver is the CSI driver to use. + Default: "secrets-store.csi.k8s.io" + type: string + mountPath: + default: /mnt/secrets + description: |- + MountPath is the default mount path for CSI secrets. + Default: "/mnt/secrets" + type: string + naming: + description: Naming configures SecretProviderClass naming + pattern. + properties: + pattern: + default: ${service}-${instance}-secrets + description: |- + Pattern is the naming pattern for SecretProviderClass resources. + Supports variable substitution: + - ${namespace}: Kubernetes namespace + - ${service}: Service type (e.g., "standalone", "clustermanager") + - ${instance}: Instance name (e.g., "my-splunk") + Example: "${service}-${instance}-secrets" -> "standalone-my-splunk-secrets" + Default: "${service}-${instance}-secrets" + type: string + type: object + vault: + description: |- + Vault configures Vault-specific settings. + Only used when DefaultProvider is "vault". + properties: + address: + description: |- + Address is the Vault server address. + Example: "https://vault.company.com" + type: string + role: + description: Role is the Vault role to use for authentication. + type: string + type: object + type: object + generateFallback: + description: |- + GenerateFallback indicates whether to generate fallback secrets. + Only for development environments. + Default: false + type: boolean + provider: + description: |- + Provider specifies the secret provider to use. + Valid values: "kubernetes", "external-secrets", "csi" + Default: "kubernetes" + enum: + - kubernetes + - external-secrets + - csi + type: string + versioningEnabled: + description: |- + VersioningEnabled enables versioned secrets for Splunk resources. + Default: true + type: boolean + versionsToKeep: + description: |- + VersionsToKeep is how many versions to keep. + Default: 3 + maximum: 10 + minimum: 1 + type: integer + type: object + type: object + status: + description: TenantConfigStatus defines the observed state of TenantConfig. + properties: + conditions: + description: Conditions represent the latest available observations + of the config's state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + observedGeneration: + description: ObservedGeneration is the generation observed by the + controller. + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 8cf491ac3..15c98e24a 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -1,6 +1,4 @@ # Adds namespace to all resources. -# Cluster-scoped deployment WITHOUT webhook (default) -# To enable webhook, use config/default-with-webhook overlay namespace: splunk-operator # Value of this field is prepended to the @@ -20,29 +18,33 @@ bases: - ../persistent-volume - ../service - ../manager -# [WEBHOOK] To enable webhook, use config/default-with-webhook overlay +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml #- ../webhook -# [CERTMANAGER] Required for webhook TLS +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. #- ../certmanager # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. #- ../prometheus # [METRICS] Expose the controller manager metrics service. - metrics_service.yaml -# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy. -# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics. -# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will -# be able to communicate with the Webhook Server. -#- ../network-policy -# [WEBHOOK] To enable webhook, use config/default-with-webhook overlay -#patchesStrategicMerge: +patchesStrategicMerge: +# Mount the controller config file for loading manager configurations +# through a ComponentConfig type +#- manager_config_patch.yaml + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml #- manager_webhook_patch.yaml -# [CERTMANAGER] Enabled for CA injection in the admission webhooks + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection #- webhookcainjection_patch.yaml # the following config is for teaching kustomize how to do var substitution -# [CERTMANAGER] Variables for cert-manager CA injection -#vars: +vars: +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR # objref: # kind: Certificate @@ -132,10 +134,4 @@ patches: # More info: https://book.kubebuilder.io/reference/metrics - path: manager_metrics_patch.yaml target: - kind: Deployment - -# [WEBHOOK] To enable webhook, use config/default-with-webhook overlay -#- path: manager_webhook_patch.yaml -# target: -# kind: Deployment - + kind: Deployment \ No newline at end of file diff --git a/config/rbac/platform_platformconfig_admin_role.yaml b/config/rbac/platform_platformconfig_admin_role.yaml new file mode 100644 index 000000000..8efb63e5a --- /dev/null +++ b/config/rbac/platform_platformconfig_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over platform.splunk.com. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: platform-platformconfig-admin-role +rules: +- apiGroups: + - platform.splunk.com + resources: + - platformconfigs + verbs: + - '*' +- apiGroups: + - platform.splunk.com + resources: + - platformconfigs/status + verbs: + - get diff --git a/config/rbac/platform_platformconfig_editor_role.yaml b/config/rbac/platform_platformconfig_editor_role.yaml new file mode 100644 index 000000000..48f4c4cba --- /dev/null +++ b/config/rbac/platform_platformconfig_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the platform.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: platform-platformconfig-editor-role +rules: +- apiGroups: + - platform.splunk.com + resources: + - platformconfigs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - platform.splunk.com + resources: + - platformconfigs/status + verbs: + - get diff --git a/config/rbac/platform_platformconfig_viewer_role.yaml b/config/rbac/platform_platformconfig_viewer_role.yaml new file mode 100644 index 000000000..bf381827e --- /dev/null +++ b/config/rbac/platform_platformconfig_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to platform.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: platform-platformconfig-viewer-role +rules: +- apiGroups: + - platform.splunk.com + resources: + - platformconfigs + verbs: + - get + - list + - watch +- apiGroups: + - platform.splunk.com + resources: + - platformconfigs/status + verbs: + - get diff --git a/config/rbac/platform_tenantconfig_admin_role.yaml b/config/rbac/platform_tenantconfig_admin_role.yaml new file mode 100644 index 000000000..a38fcf4ad --- /dev/null +++ b/config/rbac/platform_tenantconfig_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over platform.splunk.com. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: platform-tenantconfig-admin-role +rules: +- apiGroups: + - platform.splunk.com + resources: + - tenantconfigs + verbs: + - '*' +- apiGroups: + - platform.splunk.com + resources: + - tenantconfigs/status + verbs: + - get diff --git a/config/rbac/platform_tenantconfig_editor_role.yaml b/config/rbac/platform_tenantconfig_editor_role.yaml new file mode 100644 index 000000000..81bb78a75 --- /dev/null +++ b/config/rbac/platform_tenantconfig_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the platform.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: platform-tenantconfig-editor-role +rules: +- apiGroups: + - platform.splunk.com + resources: + - tenantconfigs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - platform.splunk.com + resources: + - tenantconfigs/status + verbs: + - get diff --git a/config/rbac/platform_tenantconfig_viewer_role.yaml b/config/rbac/platform_tenantconfig_viewer_role.yaml new file mode 100644 index 000000000..9738b820b --- /dev/null +++ b/config/rbac/platform_tenantconfig_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to platform.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: platform-tenantconfig-viewer-role +rules: +- apiGroups: + - platform.splunk.com + resources: + - tenantconfigs + verbs: + - get + - list + - watch +- apiGroups: + - platform.splunk.com + resources: + - tenantconfigs/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 7873f18e1..d0f88b28b 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -100,3 +100,43 @@ rules: - get - patch - update +- apiGroups: + - platform.splunk.com + resources: + - platformconfigs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - platform.splunk.com + resources: + - platformconfigs/finalizers + verbs: + - update +- apiGroups: + - platform.splunk.com + resources: + - platformconfigs/status + verbs: + - get + - patch + - update +- apiGroups: + - platform.splunk.com + resources: + - tenantconfigs + verbs: + - get + - list + - watch +- apiGroups: + - platform.splunk.com + resources: + - tenantconfigs/status + verbs: + - get diff --git a/config/samples/platform_v4_platformconfig.yaml b/config/samples/platform_v4_platformconfig.yaml new file mode 100644 index 000000000..b243bfe1b --- /dev/null +++ b/config/samples/platform_v4_platformconfig.yaml @@ -0,0 +1,9 @@ +apiVersion: platform.splunk.com/v4 +kind: PlatformConfig +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: platformconfig-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/platform_v4_tenantconfig.yaml b/config/samples/platform_v4_tenantconfig.yaml new file mode 100644 index 000000000..a5d6d54ae --- /dev/null +++ b/config/samples/platform_v4_tenantconfig.yaml @@ -0,0 +1,9 @@ +apiVersion: platform.splunk.com/v4 +kind: TenantConfig +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: tenantconfig-sample +spec: + # TODO(user): Add fields here diff --git a/docs/README.md b/docs/README.md index ebb8a0f1f..ce2f30e9c 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,167 +1,203 @@ -# Splunk Operator for Kubernetes +# Getting Started with the Splunk Operator for Kubernetes -[![License](https://img.shields.io/:license-apache-blue.svg)](http://www.apache.org/licenses/LICENSE-2.0.html) -[![PkgGoDev](https://pkg.go.dev/badge/github.com/splunk/splunk-operator)](https://pkg.go.dev/github.com/splunk/splunk-operator) -[![Go Report Card](https://goreportcard.com/badge/github.com/splunk/splunk-operator)](https://goreportcard.com/report/github.com/splunk/splunk-operator) -[![Coverage Status](https://coveralls.io/repos/github/splunk/splunk-operator/badge.svg?branch=master)](https://coveralls.io/github/splunk/splunk-operator?branch=master) -[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fsplunk%2Fsplunk-operator.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fsplunk%2Fsplunk-operator?ref=badge_shield) -[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/splunk/splunk-operator) +The Splunk Operator for Kubernetes enables you to quickly and easily deploy Splunk Enterprise on your choice of private or public cloud provider. The Operator simplifies scaling and management of Splunk Enterprise by automating administrative workflows using Kubernetes best practices. -The Splunk Operator for Kubernetes (SOK) makes it easy for Splunk -Administrators to deploy and operate Enterprise deployments in a Kubernetes -infrastructure. Packaged as a container, it uses the -[operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) -to manage Splunk-specific [custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/), -following best practices to manage all the underlying Kubernetes objects for you. +The Splunk Operator runs as a container, and uses the Kubernetes [operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) and [custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) objects to create and manage a scalable and sustainable Splunk Enterprise environment. -This repository is used to build the Splunk -[Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) -for Kubernetes (SOK). If you are just looking for documentation on how to -deploy and use the latest release, please visit the published -[Splunk Operator documentation site](https://splunk.github.io/splunk-operator/) -or review the in-repo [Getting Started Documentation](GettingStarted.html). +This guide is intended to help new users get up and running with the +Splunk Operator for Kubernetes. It is divided into the following sections: -## Splunk General Terms Acceptance +* [Known Issues for the Splunk Operator](#known-issues-for-the-splunk-operator) +* [Prerequisites for the Splunk Operator](#prerequisites-for-the-splunk-operator) +* [Installing the Splunk Operator](#installing-the-splunk-operator) +* [Creating Splunk Enterprise Deployments](#creating-a-splunk-enterprise-deployment) +* [Securing Splunk Deployments in Kubernetes](Security.md) +* [Contacting Support](#contacting-support) -Starting with operator version 3.0.0, which includes support for Splunk Enterprise version 10.x, an additional Docker-Splunk specific parameter is required to start containers. **This is a breaking change, and user action is required.** +## Support Resources -Starting in 10.x image versions of Splunk Enterprise, license acceptance requires an additional `SPLUNK_GENERAL_TERMS=--accept-sgt-current-at-splunk-com` argument. This indicates that users have read and accepted the current/latest version of the Splunk General Terms, available at https://www.splunk.com/en_us/legal/splunk-general-terms.html as may be updated from time to time. Unless you have jointly executed with Splunk a negotiated version of these General Terms that explicitly supersedes this agreement, by accessing or using Splunk software, you are agreeing to the Splunk General Terms posted at the time of your access and use and acknowledging its applicability to the Splunk software. Please read and make sure you agree to the Splunk General Terms before you access or use this software. Only after doing so should you include the `--accept-sgt-current-at-splunk-com` flag to indicate your acceptance of the current/latest Splunk General Terms and launch this software. All examples below have been updated with this change. +SPLUNK SUPPORTED: The Splunk Operator for Kubernetes is a supported method for deploying distributed Splunk Enterprise environments using containers. The Splunk Operator is categorized as an Extension and subject to the support terms found [here](https://www.splunk.com/en_us/legal/splunk-software-support-policy.html). Splunk Enterprise deployed using the Splunk Operator is subject to the applicable support level offered [here](https://www.splunk.com/en_us/customer-success/support-programs.html). -If you use the below examples and the ‘--accept-sgt-current-at-splunk-com’ flag, you are indicating that you have read and accepted the current/latest version of the Splunk General Terms, as may be updated from time to time, and acknowledging its applicability to this software - as noted above. +COMMUNITY DEVELOPED: Splunk Operator for Kubernetes is an open source product developed by Splunkers with contributions from the community of partners and customers. The primary reason why Splunk is taking this approach is to push product development closer to those that use and depend upon it. This direct connection will help us all be more successful and move at a rapid pace. -By default, the SPLUNK_GENERAL_TERMS environment variable will be set to an empty string. You must either manually update it to have the required additional value `--accept-sgt-current-at-splunk-com` in the splunk-operator-controller-manager deployment, or you can pass the `SPLUNK_GENERAL_TERMS` parameter with the required additional value to the `make deploy` command. +If you're interested in contributing to the SOK open source project, review the [Contributing to the Project](CONTRIBUTING.md) page. -``` -make deploy IMG=docker.io/splunk/splunk-operator: WATCH_NAMESPACE="namespace1" RELATED_IMAGE_SPLUNK_ENTERPRISE="splunk/splunk:edge" SPLUNK_GENERAL_TERMS="--accept-sgt-current-at-splunk-com" -``` +**Community Support & Discussions on +[Slack](https://splunk-usergroups.slack.com)** channel #splunk-operator-for-kubernetes + +**File Issues or Enhancements in +[GitHub](https://github.com/splunk/splunk-operator/issues)** splunk/splunk-operator + + +## Known Issues for the Splunk Operator + +Review the [Change Log](ChangeLog.md) page for a history of changes in each release. + +## Prerequisites for the Splunk Operator + +Please check [release notes](https://github.com/splunk/splunk-operator/releases) for supportability matrix + +## Platform recommendations + + +The Splunk Operator should work with any [CNCF certified distribution](https://www.cncf.io/certification/software-conformance/) of Kubernetes. We do not have platform recommendations, but this is a table of platforms that our developers, customers, and partners have used successfully with the Splunk Operator. + + + + + + +
Splunk Development & Testing Platforms Amazon Elastic Kubernetes Service (EKS), Google Kubernetes Engine (GKE)
Customer Reported Platforms Microsoft Azure Kubernetes Service (AKS), Red Hat OpenShift
Partner Tested Platforms HPE Ezmeral
Other Platforms CNCF certified distribution
+ +### Splunk Enterprise Compatibility + +Each Splunk Operator release has specific Splunk Enterprise compatibility requirements. Splunk Operator can support more than one version of Splunk Enterprise release. Before installing or upgrading the Splunk Operator, review the [release notes](https://github.com/splunk/splunk-operator/releases) to verify version compatibility with Splunk Enterprise releases. + +Each release of splunk-operator is preset to latest release mentioned in [release notes](https://github.com/splunk/splunk-operator/releases), if user wants to change that to any release version specified in [release notes](https://github.com/splunk/splunk-operator/releases), they can simply change envionment variable `RELATED_IMAGE_SPLUNK_ENTERPRISE` in splunk-operator deployment manifest file. + + +### Splunk Apps Installation + +Apps and add-ons can be installed using the Splunk Operator by following the instructions given at [Installing Splunk Apps](Examples.md#installing-splunk-apps). For the installation of premium apps please refer to [Premium Apps Installation Guide](PremiumApps.md). + +### Docker requirements +The Splunk Operator requires these docker images to be present or available to your Kubernetes cluster: + +* `splunk/splunk-operator`: The Splunk Operator image built by this repository or the [official release](https://hub.docker.com/r/splunk/splunk-operator) +* `splunk/splunk:`: The [Splunk Enterprise image](https://github.com/splunk/docker-splunk) + + +All of the Splunk Enterprise images are publicly available on [Docker Hub](https://hub.docker.com/). If your cluster does not have access to pull from Docker Hub, see the [Required Images Documentation](Images.md) page. + +Review the [Change Log](ChangeLog.md) page for a history of changes and Splunk Enterprise compatibility for each release. + +### Hardware Resources Requirements +The resource guidelines for running production Splunk Enterprise instances in pods through the Splunk Operator are the same as running Splunk Enterprise natively on a supported operating system and file system. Refer to the Splunk Enterprise [Reference Hardware documentation](https://docs.splunk.com/Documentation/Splunk/latest/Capacity/Referencehardware) for additional details. We would also recommend following the same guidance on [Splunk Enterprise for disabling Transparent Huge Pages (THP)](https://docs.splunk.com/Documentation/Splunk/latest/ReleaseNotes/SplunkandTHP) for the nodes in your Kubernetes cluster. Please be aware that this may impact performance of other non-Splunk workloads. + +#### Minimum Reference Hardware +Based on Splunk Enterprise [Reference Hardware documentation](https://docs.splunk.com/Documentation/Splunk/latest/Capacity/Referencehardware), a summary of the minimum reference hardware requirements is given below. + +| Standalone | Search Head / Search Head Cluster | Indexer Cluster | +| ---------- | ------- | ------- | +| _Each Standalone Pod: 12 Physical CPU Cores or 24 vCPU at 2Ghz or greater per core, 12GB RAM._| _Each Search Head Pod: 16 Physical CPU Cores or 32 vCPU at 2Ghz or greater per core, 12GB RAM._| _Each Indexer Pod: 12 Physical CPU cores, or 24 vCPU at 2GHz or greater per core, 12GB RAM._ | -For more information about this change, see the [Splunk General Terms Migration Documentation](SplunkGeneralTermsMigration.html). -## Prerequisites +#### _Using Kubernetes Quality of Service Classes_ -You must have [Docker Engine](https://docs.docker.com/install/) installed to -build the Splunk Operator. +In addition to the guidelines provided in the reference hardware, [Kubernetes Quality of Service Classes](https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/) can be used to configure CPU/Mem resources allocations that map to your _service level objectives_. For further information on utilizing Kubernetes Quality of Service (QoS) classes, see the table below: -This project uses [Go modules](https://blog.golang.org/using-go-modules), -and requires [golang](https://golang.org/doc/install) 1.23.0 or later. -You must `export GO111MODULE=on` if cloning these repositories into your -`$GOPATH` (not recommended). -The [Kubernetes Operator SDK](https://github.com/operator-framework/operator-sdk) -must also be installed to build this project. +| QoS | Summary| Description | +| ---------- | ------- | ------- | +| _Guaranteed_ | _CPU/Mem ```requests``` = CPU/Mem ```limits```_ | _When the CPU and memory ```requests``` and ```limits``` values are equal, the pod is given a QoS class of Guaranteed. This level of service is recommended for Splunk Enterprise ___production environments___._ | +| _Burstable_ | _CPU/Mem ```requests``` < CPU/Mem ```limits```_ | _When the CPU and memory ```requests``` value is set lower than the ```limits``` the pod is given a QoS class of Burstable. This level of service is useful in a user acceptance testing ___(UAT) environment___, where the pods run with minimum resources, and Kubernetes allocates additional resources depending on usage._| +| _BestEffort_ | _No CPU/Mem ```requests``` or ```limits``` are set_ | _When the ```requests``` or ```limits``` values are not set, the pod is given a QoS class of BestEffort. This level of service is sufficient for ___testing, or a small development task___._ | +Examples on how to implement these QoS are given at [Examples of Guaranteed and Burstable QoS](CustomResources.md#examples-of-guaranteed-and-burstable-qos) section. + + +### Storage guidelines +The Splunk Operator uses Kubernetes [Persistent Volume Claims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) to store all of your Splunk Enterprise configuration ("$SPLUNK_HOME/etc" path) and event ("$SPLUNK_HOME/var" path) data. If one of the underlying machines fail, Kubernetes will automatically try to recover by restarting the Splunk Enterprise pods on another machine that is able to reuse the same data volumes. This minimizes the maintenance burden on your operations team by reducing the impact of common hardware failures to the equivalent of a service restart. +The use of Persistent Volume Claims requires that your cluster is configured to support one or more Kubernetes persistent [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/). See the [Setting Up a Persistent Storage for Splunk](StorageClass.md) page for more +information. + +### What Storage Type To Use? + +The Kubernetes infrastructure must have access to storage that meets or exceeds the recommendations provided in the Splunk Enterprise storage type recommendations at [Reference Hardware documentation - what storage type to use for a given role?](https://docs.splunk.com/Documentation/Splunk/latest/Capacity/Referencehardware#What_storage_type_should_I_use_for_a_role.3F) In summary, Indexers with SmartStore need NVMe or SSD storage to provide the necessary IOPs for a successful Splunk Enterprise environment. + + +### Splunk SmartStore Required +For production environments, we are requiring the use of Splunk SmartStore. As a Splunk Enterprise deployment's data volume increases, demand for storage typically outpaces demand for compute resources. [Splunk's SmartStore Feature](https://docs.splunk.com/Documentation/Splunk/latest/Indexer/AboutSmartStore) allows you to manage your indexer storage and compute resources in a ___cost-effective___ manner by scaling those resources separately. SmartStore utilizes a fast storage cache on each indexer node to keep recent data locally available for search and keep other data in a remote object store. Look into the [SmartStore Resource Guide](SmartStore.md) document for configuring and using SmartStore through operator. + +## Installing the Splunk Operator + +A Kubernetes cluster administrator can install and start the Splunk Operator for specific namespace by running: ``` -git clone -b v1.39.0 https://github.com/operator-framework/operator-sdk -cd operator-sdk -make install +kubectl apply -f https://github.com/splunk/splunk-operator/releases/download/3.0.0/splunk-operator-namespace.yaml --server-side ``` -You may need to add `$GOPATH/bin` to your path to run the `operator-sdk` -command line tool: - +A Kubernetes cluster administrator can install and start the Splunk Operator for cluster-wide by running: ``` -export PATH=${PATH}:${GOPATH}/bin +kubectl apply -f https://github.com/splunk/splunk-operator/releases/download/3.0.0/splunk-operator-cluster.yaml --server-side ``` -It is also recommended that you install the following golang tools, -which are used by various `make` targets: +The reason for appending `--server-side` to the apply command is that some of the CRDs are getting too long according to the CRD standards. There are no real implications caused by this. -```shell -go install golang.org/x/lint/golint -go install golang.org/x/tools/cmd/cover -go install github.com/mattn/goveralls -go get -u github.com/mikefarah/yq/v3 -go get -u github.com/go-delve/delve/cmd/dlv -``` +The [Advanced Installation Instructions](Install.md) page offers guidance for advanced configurations, including the use of private image registries, installation at cluster scope, and installing the Splunk Operator as a user who is not a Kubernetes administrator. Users of Red Hat OpenShift should review the [Red Hat OpenShift](OpenShift.md) page. -## Cloning this repository +*Note: We recommended that the Splunk Enterprise Docker image is copied to a private registry, or directly onto your Kubernetes workers before creating large Splunk Enterprise deployments. See the [Required Images Documentation](Images.md) page, and the [Advanced Installation Instructions](Install.md) page for guidance on working with copies of the Docker images.* -```shell -git clone git@github.com:splunk/splunk-operator.git -cd splunk-operator +After the Splunk Operator starts, you'll see a single pod running within your current namespace: ``` +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +splunk-operator-75f5d4d85b-8pshn 1/1 Running 0 5s +``` + +### Installation using Helm charts + +Installing the Splunk Operator using Helm allows you to quickly deploy the operator and Splunk Enterprise in a Kubernetes cluster. The operator is easily configurable allowing for advanced installations including support for Splunk Validated Architectures. Helm also provides a number of features to manage the operator lifecycle. The [Installation using Helm](Helm.md) page will walk you through installing and configuring Splunk Enterprise deployments using Helm charts. -## Repository overview - -This repository consists of the following code used to build the splunk-operator binary: - -* `main.go`: Provides the main() function, where everything begins -* `apis/`: Source code for the operator's custom resource definition types -* `controllers/`: Used to register controllers that watch for changes to custom resources -* `pkg/splunk/enterprise/`: Source code for controllers that manage Splunk Enterprise resources -* `pkg/splunk/controller/`: Common code shared across Splunk controllers -* `pkg/splunk/common/`: Common code used by most other splunk packages -* `pkg/splunk/client/`: Simple client for Splunk Enterprise REST API -* `pkg/splunk/test/`: Common code used by other packages for unit testing - -`main()` uses `controllers` to register all the `enterprise` controllers -that manage custom resources by watching for Kubernetes events. -The `enterprise` controllers are implemented using common code provided -by the `controllers` package. The `enterprise` controllers also use the REST API client -provided in the `pkg/splunk/client` package. The types provided by `apis/` and -common code in the `pkg/splunk/common/` package are used universally. Note that the -source code for `main()` is generated from a template provided by the Operator SDK. - -In addition to the source code, this repository includes: - -* `tools`: Build scripts, templates, etc. used to build the container image -* `config`: Kubernetes YAML templates used to install the Splunk Operator -* `docs`: Getting Started Guide and other documentation in Markdown format -* `test`: Integration test framework built using Ginkgo. See [docs](https://github.com/splunk/splunk-operator/blob/main/test/README.md) for more info. - -## Building the operator - -You can build the operator by just running `make`. - -Other make targets include (more info below): - -* `make all`: builds `manager` executable -* `make test`: Runs unit tests with Coveralls code coverage output to coverage.out -* `make scorecard`: Runs operator-sdk scorecard tests using OLM installation bundle -* `make generate`: runs operator-generate k8s, crds and csv commands, updating installation YAML files and OLM bundle -* `make docker-build`: generates `splunk-operator` container image example `make docker-build IMG=docker.io/splunk/splunk-operator:` -* `make docker-buildx`: generates `splunk-operator` container image for multiple platforms, example `make docker-buildx IMG=docker.io/splunk/splunk-operator:` -* `make docker-push`: push docker image to given repository example `make docker-push IMG=docker.io/splunk/splunk-operator:` -* `make clean`: removes the binary build output and `splunk-operator` container image example `make docker-push IMG=docker.io/splunk/splunk-operator:` -* `make run`: runs the Splunk Operator locally, monitoring the Kubernetes cluster configured in your current `kubectl` context -* `make fmt`: runs `go fmt` on all `*.go` source files in this project -* `make bundle-build`: generates `splunk-operator-bundle` bundle container image for OLM example `make bundle-build IMAGE_TAG_BASE=docker.io/splunk/splunk-operator VERSION= IMG=docker.io/splunk/splunk-operator:` -* `make bundle-push`: push OLM bundle docker image to given repository example `make bundle-push IMAGE_TAG_BASE=docker.io/splunk/splunk-operator VERSION= IMG=docker.io/splunk/splunk-operator:` -* `make catalog-build`: generates `splunk-operator-catalog` catalog container image example `make catalog-build IMAGE_TAG_BASE=docker.io/splunk/splunk-operator VERSION= IMG=docker.io/splunk/splunk-operator:` -* `make catalog-push`: push catalog docker image to given repository example`make catalog-push IMAGE_TAG_BASE=docker.io/splunk/splunk-operator VERSION= IMG=docker.io/splunk/splunk-operator:` - -## Deploying the Splunk Operator -`make deploy` command will deploy all the necessary resources to run Splunk Operator like RBAC policies, services, configmaps, deployment. Operator will be installed in `splunk-operator` namespace. If `splunk-operator` namespace does not exist, it will create the namespace. By default `make deploy` will install operator clusterwide. Operator will watch all the namespaces for any splunk enterprise custom resources. - -```shell -make deploy IMG=docker.io/splunk/splunk-operator: +Splunk Operator CRDs are not deployed as part of the helm installation. Users need to deploy the latest CRDs manually. See the [Installation using Helm](Helm.md) documentation on how to deploy the CRDs before installing the helm charts. + +## Upgrading the Splunk Operator + +For information on upgrading the Splunk Operator, see the [How to upgrade Splunk Operator and Splunk Enterprise Deployments](SplunkOperatorUpgrade.md) page. + +## Creating a Splunk Enterprise deployment + +The `Standalone` custom resource is used to create a single instance deployment of Splunk Enterprise. For example: + +1. Run the command to create a deployment named “s1”: + + +```yaml +cat < WATCH_NAMESPACE="namespace1" +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +splunk-operator-7c5599546c-wt4xl 1/1 Running 0 11h +splunk-s1-standalone-0 1/1 Running 0 45s ``` -If you want operator to use specific version of splunk instance, then you must pass `RELATED_IMAGE_SPLUNK_ENTERPRISE` parameter to `make deploy` command +*Note: if your shell prints a `%` at the end, leave that out when you copy the output.* + +2. You can use a simple network port forward to open port 8000 for Splunk Web access: ``` -make deploy IMG=docker.io/splunk/splunk-operator: WATCH_NAMESPACE="namespace1" RELATED_IMAGE_SPLUNK_ENTERPRISE="splunk/splunk:edge" +kubectl port-forward splunk-s1-standalone-0 8000 ``` -Use this to run the operator as a local foreground process on your machine: +3. Get your passwords for the namespace. The Splunk Enterprise passwords used in the namespace are generated automatically. To learn how to find and read the passwords, see the [Reading global kubernetes secret object](Examples.md#reading-global-kubernetes-secret-object) page. + -```shell -make run +4. Log into Splunk Enterprise at http://localhost:8000 using the `admin` account with the password. + +5. To delete your standalone deployment, run: + +``` +kubectl delete standalone s1 ``` -This will use your current Kubernetes context from `~/.kube/config` to manage -resources in your current namespace. +The `Standalone` custom resource is just one of the resources the Splunk Operator provides. You can find more custom resources and the parameters they support on the [Custom Resource Guide](CustomResources.md) page. -Please see the [Getting Started Documentation](GettingStarted.html) for more -information, including instructions on how to install the operator in your -cluster. +For additional deployment examples, including Splunk Enterprise clusters, see the +[Configuring Splunk Enterprise Deployments](Examples.md) page. +For additional guidance on making Splunk Enterprise ports accessible outside of Kubernetes, see the [Configuring Ingress](Ingress.md) page. -## License -[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fsplunk%2Fsplunk-operator.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fsplunk%2Fsplunk-operator?ref=badge_large) \ No newline at end of file +## Contacting Support +If you are a Splunk Enterprise customer with a valid support entitlement contract and have a Splunk-related question, you can open a support case on the https://www.splunk.com/ support portal. diff --git a/internal/controller/clustermanager_controller_test.go b/internal/controller/clustermanager_controller_test.go index 771d2f4f6..29152d7b1 100644 --- a/internal/controller/clustermanager_controller_test.go +++ b/internal/controller/clustermanager_controller_test.go @@ -3,7 +3,6 @@ package controller import ( "context" "fmt" - "github.com/splunk/splunk-operator/internal/controller/testutils" enterpriseApi "github.com/splunk/splunk-operator/api/v4" diff --git a/internal/controller/clustermaster_controller_test.go b/internal/controller/clustermaster_controller_test.go index 5c5de2584..02d86c736 100644 --- a/internal/controller/clustermaster_controller_test.go +++ b/internal/controller/clustermaster_controller_test.go @@ -3,7 +3,6 @@ package controller import ( "context" "fmt" - "github.com/splunk/splunk-operator/internal/controller/testutils" enterpriseApi "github.com/splunk/splunk-operator/api/v4" diff --git a/internal/controller/indexercluster_controller_test.go b/internal/controller/indexercluster_controller_test.go index f9473f0f8..fe221bbd5 100644 --- a/internal/controller/indexercluster_controller_test.go +++ b/internal/controller/indexercluster_controller_test.go @@ -3,7 +3,6 @@ package controller import ( "context" "fmt" - "github.com/splunk/splunk-operator/internal/controller/testutils" enterpriseApi "github.com/splunk/splunk-operator/api/v4" diff --git a/internal/controller/licensemanager_controller_test.go b/internal/controller/licensemanager_controller_test.go index 4d95d6b5f..0ec97a639 100644 --- a/internal/controller/licensemanager_controller_test.go +++ b/internal/controller/licensemanager_controller_test.go @@ -3,7 +3,6 @@ package controller import ( "context" "fmt" - "github.com/splunk/splunk-operator/internal/controller/testutils" enterpriseApi "github.com/splunk/splunk-operator/api/v4" diff --git a/internal/controller/licensemaster_controller_test.go b/internal/controller/licensemaster_controller_test.go index fdd967aa3..fc2dc7b7c 100644 --- a/internal/controller/licensemaster_controller_test.go +++ b/internal/controller/licensemaster_controller_test.go @@ -3,7 +3,6 @@ package controller import ( "context" "fmt" - "github.com/splunk/splunk-operator/internal/controller/testutils" enterpriseApi "github.com/splunk/splunk-operator/api/v4" diff --git a/internal/controller/monitoringconsole_controller_test.go b/internal/controller/monitoringconsole_controller_test.go index 644f13da0..bc5949d53 100644 --- a/internal/controller/monitoringconsole_controller_test.go +++ b/internal/controller/monitoringconsole_controller_test.go @@ -3,7 +3,6 @@ package controller import ( "context" "fmt" - "github.com/splunk/splunk-operator/internal/controller/testutils" enterpriseApi "github.com/splunk/splunk-operator/api/v4" diff --git a/internal/controller/platform/platformconfig_controller.go b/internal/controller/platform/platformconfig_controller.go new file mode 100644 index 000000000..26e67500b --- /dev/null +++ b/internal/controller/platform/platformconfig_controller.go @@ -0,0 +1,63 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package platform + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + platformv4 "github.com/splunk/splunk-operator/api/platform/v4" +) + +// PlatformConfigReconciler reconciles a PlatformConfig object +type PlatformConfigReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=platform.splunk.com,resources=platformconfigs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=platform.splunk.com,resources=platformconfigs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=platform.splunk.com,resources=platformconfigs/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the PlatformConfig object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/reconcile +func (r *PlatformConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = logf.FromContext(ctx) + + // TODO(user): your logic here + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PlatformConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&platformv4.PlatformConfig{}). + Named("platform-platformconfig"). + Complete(r) +} diff --git a/internal/controller/platform/platformconfig_controller_test.go b/internal/controller/platform/platformconfig_controller_test.go new file mode 100644 index 000000000..c65b8aa79 --- /dev/null +++ b/internal/controller/platform/platformconfig_controller_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package platform + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + platformv4 "github.com/splunk/splunk-operator/api/platform/v4" +) + +var _ = Describe("PlatformConfig Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + platformconfig := &platformv4.PlatformConfig{} + + BeforeEach(func() { + By("creating the custom resource for the Kind PlatformConfig") + err := k8sClient.Get(ctx, typeNamespacedName, platformconfig) + if err != nil && errors.IsNotFound(err) { + resource := &platformv4.PlatformConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &platformv4.PlatformConfig{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance PlatformConfig") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &PlatformConfigReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/platform/suite_test.go b/internal/controller/platform/suite_test.go new file mode 100644 index 000000000..9d455d4e8 --- /dev/null +++ b/internal/controller/platform/suite_test.go @@ -0,0 +1,116 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package platform + +import ( + "context" + "os" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + platformv4 "github.com/splunk/splunk-operator/api/platform/v4" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + ctx context.Context + cancel context.CancelFunc + testEnv *envtest.Environment + cfg *rest.Config + k8sClient client.Client +) + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + var err error + err = platformv4.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + // Retrieve the first found binary directory to allow running tests from IDEs + if getFirstFoundEnvTestBinaryDir() != "" { + testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir() + } + + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) + +// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path. +// ENVTEST-based tests depend on specific binaries, usually located in paths set by +// controller-runtime. When running tests directly (e.g., via an IDE) without using +// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured. +// +// This function streamlines the process by finding the required binaries, similar to +// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are +// properly set up, run 'make setup-envtest' beforehand. +func getFirstFoundEnvTestBinaryDir() string { + basePath := filepath.Join("..", "..", "..", "bin", "k8s") + entries, err := os.ReadDir(basePath) + if err != nil { + logf.Log.Error(err, "Failed to read directory", "path", basePath) + return "" + } + for _, entry := range entries { + if entry.IsDir() { + return filepath.Join(basePath, entry.Name()) + } + } + return "" +} diff --git a/internal/controller/searchheadcluster_controller_test.go b/internal/controller/searchheadcluster_controller_test.go index 983849237..2e764909c 100644 --- a/internal/controller/searchheadcluster_controller_test.go +++ b/internal/controller/searchheadcluster_controller_test.go @@ -3,7 +3,6 @@ package controller import ( "context" "fmt" - "github.com/splunk/splunk-operator/internal/controller/testutils" enterpriseApi "github.com/splunk/splunk-operator/api/v4" diff --git a/internal/controller/standalone_controller.go b/internal/controller/standalone_controller.go index bb7106f05..0a0b9623a 100644 --- a/internal/controller/standalone_controller.go +++ b/internal/controller/standalone_controller.go @@ -40,6 +40,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/log" + + sdk "github.com/splunk/splunk-operator/pkg/platform-sdk" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" ) const ( @@ -49,8 +52,9 @@ const ( // StandaloneReconciler reconciles a Standalone object type StandaloneReconciler struct { client.Client - Scheme *runtime.Scheme - Recorder record.EventRecorder + Scheme *runtime.Scheme + Recorder record.EventRecorder + SDKRuntime api.Runtime } //+kubebuilder:rbac:groups=enterprise.splunk.com,resources=standalones,verbs=get;list;watch;create;update;patch;delete @@ -68,6 +72,8 @@ type StandaloneReconciler struct { //+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=platform.splunk.com,resources=tenantconfigs,verbs=get;list;watch +//+kubebuilder:rbac:groups=platform.splunk.com,resources=tenantconfigs/status,verbs=get // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -113,7 +119,7 @@ func (r *StandaloneReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Pass event recorder through context ctx = context.WithValue(ctx, splcommon.EventRecorderKey, r.Recorder) - result, err := ApplyStandalone(ctx, r.Client, instance) + result, err := ApplyStandalone(ctx, r.Client, r.SDKRuntime, instance) if result.Requeue && result.RequeueAfter != 0 { reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) } @@ -122,12 +128,40 @@ func (r *StandaloneReconciler) Reconcile(ctx context.Context, req ctrl.Request) } // ApplyStandalone adding to handle unit test case -var ApplyStandalone = func(ctx context.Context, client client.Client, instance *enterpriseApi.Standalone) (reconcile.Result, error) { - return enterprise.ApplyStandalone(ctx, client, instance) +var ApplyStandalone = func(ctx context.Context, client client.Client, sdkRuntime api.Runtime, instance *enterpriseApi.Standalone) (reconcile.Result, error) { + return enterprise.ApplyStandalone(ctx, client, sdkRuntime, instance) } // SetupWithManager sets up the controller with the Manager. func (r *StandaloneReconciler) SetupWithManager(mgr ctrl.Manager) error { + // Initialize Platform SDK Runtime (optional - falls back to legacy mode if unavailable) + recorder := mgr.GetEventRecorderFor("splunk-standalone-controller") + logger := ctrl.Log.WithName("standalone-controller") + + sdkRuntime, err := sdk.NewRuntime( + mgr.GetClient(), + sdk.WithClusterScoped(), + sdk.WithLogger(ctrl.Log.WithName("platform-sdk")), + sdk.WithEventRecorder(recorder), + ) + if err != nil { + logger.Info("Platform SDK runtime initialization failed, falling back to legacy mode", "error", err.Error()) + r.SDKRuntime = nil + } else { + // Start SDK runtime with timeout to prevent hanging + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + startErr := sdkRuntime.Start(ctx) + if startErr != nil { + logger.Info("Platform SDK runtime start failed, falling back to legacy mode", "error", startErr.Error()) + r.SDKRuntime = nil + } else { + logger.Info("Platform SDK runtime initialized successfully") + r.SDKRuntime = sdkRuntime + } + } + return ctrl.NewControllerManagedBy(mgr). For(&enterpriseApi.Standalone{}). WithEventFilter(predicate.Or( diff --git a/internal/controller/standalone_controller_test.go b/internal/controller/standalone_controller_test.go index d7c4ca842..498423602 100644 --- a/internal/controller/standalone_controller_test.go +++ b/internal/controller/standalone_controller_test.go @@ -3,10 +3,10 @@ package controller import ( "context" "fmt" - "github.com/splunk/splunk-operator/internal/controller/testutils" enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" "time" @@ -39,7 +39,7 @@ var _ = Describe("Standalone Controller", func() { It("Get Standalone custom resource should failed", func() { namespace := "ns-splunk-st-1" - ApplyStandalone = func(ctx context.Context, client client.Client, instance *enterpriseApi.Standalone) (reconcile.Result, error) { + ApplyStandalone = func(ctx context.Context, client client.Client, sdkRuntime api.Runtime, instance *enterpriseApi.Standalone) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -52,7 +52,7 @@ var _ = Describe("Standalone Controller", func() { It("Create Standalone custom resource with annotations should pause", func() { namespace := "ns-splunk-st-2" - ApplyStandalone = func(ctx context.Context, client client.Client, instance *enterpriseApi.Standalone) (reconcile.Result, error) { + ApplyStandalone = func(ctx context.Context, client client.Client, sdkRuntime api.Runtime, instance *enterpriseApi.Standalone) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -71,7 +71,7 @@ var _ = Describe("Standalone Controller", func() { It("Create Standalone custom resource should succeeded", func() { namespace := "ns-splunk-st-3" - ApplyStandalone = func(ctx context.Context, client client.Client, instance *enterpriseApi.Standalone) (reconcile.Result, error) { + ApplyStandalone = func(ctx context.Context, client client.Client, sdkRuntime api.Runtime, instance *enterpriseApi.Standalone) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -84,7 +84,7 @@ var _ = Describe("Standalone Controller", func() { It("Cover Unused methods", func() { namespace := "ns-splunk-st-4" - ApplyStandalone = func(ctx context.Context, client client.Client, instance *enterpriseApi.Standalone) (reconcile.Result, error) { + ApplyStandalone = func(ctx context.Context, client client.Client, sdkRuntime api.Runtime, instance *enterpriseApi.Standalone) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} diff --git a/pkg/platform-sdk/README.md b/pkg/platform-sdk/README.md new file mode 100644 index 000000000..b538e56cd --- /dev/null +++ b/pkg/platform-sdk/README.md @@ -0,0 +1,515 @@ +# Platform SDK + +The Platform SDK provides a unified, high-level API for building Splunk Operator controllers. It abstracts common patterns like certificate management, secret rotation, service discovery, and resource construction. + +## Table of Contents + +- [Quick Start](#quick-start) +- [Architecture](#architecture) +- [Core Concepts](#core-concepts) +- [Services](#services) +- [Builders](#builders) +- [Configuration](#configuration) +- [Logging and Events](#logging-and-events) +- [Examples](#examples) + +## Quick Start + +### Setup in Controller + +```go +import ( + "github.com/splunk/splunk-operator/pkg/platform-sdk" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" +) + +type MyReconciler struct { + client.Client + sdkRuntime api.Runtime +} + +func (r *MyReconciler) SetupWithManager(mgr ctrl.Manager) error { + // Create SDK runtime + recorder := mgr.GetEventRecorderFor("splunk-operator") + sdkRuntime, err := sdk.NewRuntime( + mgr.GetClient(), + sdk.WithClusterScoped(), + sdk.WithLogger(log), + sdk.WithEventRecorder(recorder), + ) + if err != nil { + return err + } + + // Start the SDK + if err := sdkRuntime.Start(context.Background()); err != nil { + return err + } + + r.sdkRuntime = sdkRuntime + + return ctrl.NewControllerManagedBy(mgr). + For(&enterprisev4.Standalone{}). + Complete(r) +} +``` + +### Usage in Reconcile Loop + +```go +func (r *MyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // Create reconcile context + rctx := r.sdkRuntime.NewReconcileContext(ctx, req.Namespace, req.Name) + + // Get the CR + cr := &enterprisev4.Standalone{} + if err := r.Get(ctx, req.NamespacedName, cr); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Resolve certificate + cert, err := rctx.ResolveCertificate(certificate.Binding{ + Name: "splunk-tls", + DNSNames: []string{ + fmt.Sprintf("%s.%s.svc", cr.Name, cr.Namespace), + fmt.Sprintf("%s.%s.svc.cluster.local", cr.Name, cr.Namespace), + }, + }) + if err != nil { + return ctrl.Result{}, err + } + if !cert.Ready { + rctx.Logger().Info("Certificate not ready, requeueing") + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + + // Resolve secret + secretRef, err := rctx.ResolveSecret(secret.Binding{ + Name: "splunk-credentials", + Type: secret.TypeSplunk, + Keys: []string{"password", "hec_token"}, + }) + if err != nil { + return ctrl.Result{}, err + } + + // Build StatefulSet + sts, err := rctx.BuildStatefulSet(). + WithName(cr.Name). + WithReplicas(cr.Spec.Replicas). + WithImage(cr.Spec.Image). + WithPorts([]corev1.ContainerPort{ + {Name: "web", ContainerPort: 8000}, + {Name: "mgmt", ContainerPort: 8089}, + }). + WithCertificate(cert). + WithSecret(secretRef). + WithObservability(). + Build() + if err != nil { + return ctrl.Result{}, err + } + + // Apply the StatefulSet + if err := r.Client.Patch(ctx, sts, client.Apply, + client.ForceOwnership, client.FieldOwner("splunk-operator")); err != nil { + return ctrl.Result{}, err + } + + // Record event + rctx.EventRecorder().Event(cr, corev1.EventTypeNormal, + api.EventReasonCertificateReady, + fmt.Sprintf("Certificate %s is ready", cert.SecretName)) + + return ctrl.Result{}, nil +} +``` + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Controller Layer │ +│ (Standalone, ClusterManager, SearchHead controllers) │ +└───────────────────────┬─────────────────────────────────────┘ + │ + │ Uses + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ Platform SDK │ +├─────────────────────────────────────────────────────────────┤ +│ Runtime (Singleton per operator) │ +│ │ │ +│ ├─► ReconcileContext (Per reconciliation) │ +│ │ │ +│ └─► Service Registry │ +│ ├─► ConfigResolver │ +│ ├─► CertificateResolver ─► [cert-manager|self-sign]│ +│ ├─► SecretResolver ─► [ESO|Kubernetes] │ +│ ├─► DiscoveryService │ +│ ├─► ObservabilityService │ +│ └─► Builders (StatefulSet, Service, etc.) │ +└─────────────────────────────────────────────────────────────┘ + │ + │ Reads + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ Configuration Layer │ +│ PlatformConfig (cluster) + TenantConfig (namespace) │ +└─────────────────────────────────────────────────────────────┘ +``` + +## Core Concepts + +### Runtime + +The `Runtime` is the main SDK entry point. Create **one instance per operator** and reuse it across all reconciliations. + +- Manages lifecycle of all services +- Starts background watchers for configuration +- Detects cluster capabilities (cert-manager, ESO, OTel) + +### ReconcileContext + +A `ReconcileContext` is a lightweight, short-lived object created **once per reconciliation**. It: + +- Knows about the specific resource being reconciled +- Provides a logger with namespace/name context +- Provides access to all SDK services and builders +- Includes EventRecorder for emitting Kubernetes events + +### Service Registry + +The registry creates and caches services with lazy initialization: + +- **ConfigResolver**: Hierarchical configuration resolution +- **CertificateResolver**: Certificate provisioning +- **SecretResolver**: Secret validation and versioning +- **DiscoveryService**: Service discovery +- **ObservabilityService**: Monitoring annotations + +## Services + +### ConfigResolver + +Resolves configuration with 4-layer hierarchy: + +1. Built-in defaults (hardcoded) +2. PlatformConfig (cluster-scoped) +3. TenantConfig (namespace-scoped) +4. CR spec (per-resource) + +```go +// Get resolved certificate config +certConfig, err := configResolver.ResolveCertificateConfig(ctx, namespace) + +// Check provider +if certConfig.Provider == "cert-manager" { + // Use cert-manager +} +``` + +### CertificateResolver + +Provisions TLS certificates with automatic provider selection: + +```go +cert, err := rctx.ResolveCertificate(certificate.Binding{ + Name: "my-tls", + DNSNames: []string{"my-service.default.svc"}, + Duration: 90 * 24 * time.Hour, +}) + +if !cert.Ready { + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil +} + +// Use cert.SecretName in pod spec +``` + +**Providers:** +- **cert-manager**: Creates Certificate CR, watches for ready status +- **self-signed**: Generates RSA 2048-bit key and X.509 cert, stores in Secret + +### SecretResolver + +Validates secrets and manages versioning for Splunk secrets: + +```go +secretRef, err := rctx.ResolveSecret(secret.Binding{ + Name: "splunk-credentials", + Type: secret.TypeSplunk, + Keys: []string{"password", "hec_token"}, +}) + +// secretRef.SecretName might be "splunk-credentials-v3" (versioned) +``` + +**Versioning:** +- Source: `splunk-{namespace}-secret` (admin creates) +- Versioned: `{base}-v1`, `{base}-v2`, `{base}-v3` (SDK manages) +- Automatic rotation on content change +- Keeps last 3 versions for rollback + +### DiscoveryService + +Discovers Splunk instances and Kubernetes services: + +```go +// Find all IndexerClusters +endpoints, err := rctx.DiscoverSplunk(discovery.SplunkSelector{ + Type: discovery.SplunkTypeIndexerCluster, + Namespace: "splunk-prod", +}) + +for _, ep := range endpoints { + log.Info("Found indexer", "url", ep.URL, "ready", ep.Health.Healthy) +} + +// Generic service discovery +services, err := rctx.Discover(discovery.Selector{ + Labels: map[string]string{"app": "splunk"}, +}) +``` + +### ObservabilityService + +Adds monitoring and telemetry annotations: + +```go +// Check if observability is enabled +enabled, err := observability.ShouldAddObservability(ctx, namespace) + +// Get annotations for pod spec +annotations, err := observability.GetObservabilityAnnotations(ctx, namespace) +// Returns: {"prometheus.io/scrape": "true", "prometheus.io/port": "9090", ...} +``` + +## Builders + +All builders use a fluent API pattern: + +### StatefulSetBuilder + +```go +sts, err := rctx.BuildStatefulSet(). + WithName("splunk-indexer"). + WithReplicas(3). + WithImage("splunk/splunk:9.1.0"). + WithPorts([]corev1.ContainerPort{ + {Name: "web", ContainerPort: 8000}, + {Name: "mgmt", ContainerPort: 8089}, + {Name: "s2s", ContainerPort: 9997}, + }). + WithCertificate(certRef). // Auto-creates volume & mount + WithSecret(secretRef). // Auto-creates volume + WithConfigMap("app-config"). // Auto-creates volume + WithObservability(). // Adds Prometheus annotations + WithEnv(corev1.EnvVar{ + Name: "SPLUNK_ROLE", + Value: "splunk_indexer", + }). + WithResources(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("8Gi"), + }, + }). + Build() +``` + +### ServiceBuilder + +```go +svc, err := rctx.BuildService(). + WithName("splunk-indexer"). + WithType(corev1.ServiceTypeClusterIP). + WithPorts([]corev1.ServicePort{ + {Name: "web", Port: 8000, TargetPort: intstr.FromInt(8000)}, + {Name: "mgmt", Port: 8089, TargetPort: intstr.FromInt(8089)}, + }). + WithDiscoveryLabels(). // Adds splunk.com/discoverable=true + Build() +``` + +### ConfigMapBuilder + +```go +cm, err := rctx.BuildConfigMap(). + WithName("app-config"). + WithData(map[string]string{ + "server.conf": serverConfContent, + "inputs.conf": inputsConfContent, + }). + Build() +``` + +### DeploymentBuilder + +```go +deploy, err := rctx.BuildDeployment(). + WithName("splunk-forwarder"). + WithReplicas(2). + WithImage("splunk/universalforwarder:9.1.0"). + WithPorts([]corev1.ContainerPort{ + {Name: "mgmt", ContainerPort: 8089}, + }). + WithCertificate(certRef). + WithObservability(). + Build() +``` + +## Configuration + +### PlatformConfig (Cluster-scoped) + +```yaml +apiVersion: platform.splunk.com/v1alpha1 +kind: PlatformConfig +metadata: + name: platform-default +spec: + certificates: + provider: cert-manager + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + duration: 7776000 # 90 days + renewBefore: 2592000 # 30 days + + secrets: + provider: kubernetes + versioningEnabled: true + versionsToKeep: 3 + + observability: + enabled: true + provider: prometheus + prometheusPort: 9090 + prometheusPath: /metrics +``` + +### TenantConfig (Namespace-scoped) + +```yaml +apiVersion: platform.splunk.com/v1alpha1 +kind: TenantConfig +metadata: + name: tenant-config + namespace: splunk-prod +spec: + certificates: + provider: self-signed # Override to self-signed for this namespace + + observability: + enabled: false # Disable observability for this tenant +``` + +## Logging and Events + +### Logging + +The SDK uses structured logging with log levels: + +```go +// V(0) - Info: Important state changes (always visible) +rctx.Logger().Info("Certificate provisioned", "name", cert.SecretName) + +// V(1) - Debug: Detailed operations +rctx.Logger().V(1).Info("Using cached config", "namespace", namespace) + +// V(2) - Trace: Very detailed, all API calls +rctx.Logger().V(2).Info("Calling cert-manager API", "url", endpoint) + +// Errors (always visible) +rctx.Logger().Error(err, "Failed to provision certificate", "name", certName) +``` + +Enable debug logs: `--zap-log-level=debug` or `--v=1` +Enable trace logs: `--zap-log-level=trace` or `--v=2` + +### Events + +Emit Kubernetes events for important state changes: + +```go +// Normal events +rctx.EventRecorder().Event(cr, corev1.EventTypeNormal, + api.EventReasonCertificateReady, + "Certificate has been provisioned successfully") + +// Warning events +rctx.EventRecorder().Event(cr, corev1.EventTypeWarning, + api.EventReasonSecretMissing, + "Secret splunk-credentials not found") +``` + +View events: +```bash +kubectl describe standalone my-splunk +# Events: +# Normal CertificateReady Certificate my-tls is ready +# Normal SecretVersionCreated Created secret version 3 +``` + +## Examples + +See the [examples](examples/) directory for complete working examples: + +- [Basic Standalone](examples/basic-standalone/): Simple Splunk standalone with certificates +- [Clustered Setup](examples/clustered/): Indexer cluster with service discovery +- [Custom Configuration](examples/custom-config/): Using PlatformConfig and TenantConfig +- [Observability Integration](examples/observability/): Prometheus metrics and OTel tracing + +## Best Practices + +1. **One Runtime per operator**: Create the SDK runtime once in SetupWithManager +2. **One ReconcileContext per reconciliation**: Create fresh context in each Reconcile call +3. **Check Ready status**: Always check `cert.Ready` and `secret.Ready` before proceeding +4. **Requeue when not ready**: Return `RequeueAfter: 10s` when resources aren't ready yet +5. **Use structured logging**: Include relevant fields in all log statements +6. **Emit events**: Record important state changes for visibility +7. **Use builders**: Leverage fluent API for readable resource construction +8. **Configure hierarchically**: Use PlatformConfig for defaults, TenantConfig for overrides + +## Troubleshooting + +### Certificate not ready + +```go +if !cert.Ready { + rctx.Logger().Info("Certificate not ready", + "provider", cert.Provider, + "error", cert.Error) + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil +} +``` + +### Secret version mismatch + +```go +if secretRef.Version != nil { + rctx.Logger().V(1).Info("Using versioned secret", + "version", *secretRef.Version, + "secretName", secretRef.SecretName) +} +``` + +### Service discovery returns empty + +```go +endpoints, err := rctx.DiscoverSplunk(selector) +if len(endpoints) == 0 { + rctx.Logger().Info("No services found", + "type", selector.Type, + "namespace", selector.Namespace) +} +``` + +### Enable debug logging + +Set log level to see detailed SDK operations: +```bash +--zap-log-level=debug # V(1) logs +--zap-log-level=trace # V(2) logs +``` diff --git a/pkg/platform-sdk/api/builders/builders.go b/pkg/platform-sdk/api/builders/builders.go new file mode 100644 index 000000000..c743b1dca --- /dev/null +++ b/pkg/platform-sdk/api/builders/builders.go @@ -0,0 +1,173 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package builders provides fluent builders for Kubernetes resources. +package builders + +import ( + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +// StatefulSetBuilder builds a StatefulSet with SDK integrations. +// +// The builder automatically: +// - Mounts secrets at /mnt/splunk-secrets +// - Mounts certificates at /mnt/tls and /mnt/ca-bundle +// - Adds observability annotations (Prometheus) +// - Sets owner references for garbage collection +// - Applies SDK conventions for labels and selectors +// +// Example: +// +// sts, err := rctx.BuildStatefulSet(). +// WithName("postgres"). +// WithNamespace("default"). +// WithReplicas(1). +// WithImage("postgres:15"). +// WithSecret(secret). +// WithCertificate(cert). +// WithObservability(). +// Build() +type StatefulSetBuilder interface { + WithName(name string) StatefulSetBuilder + WithNamespace(namespace string) StatefulSetBuilder + WithReplicas(replicas int32) StatefulSetBuilder + WithImage(image string) StatefulSetBuilder + WithPorts(ports []corev1.ContainerPort) StatefulSetBuilder + WithCommand(command []string) StatefulSetBuilder + WithArgs(args []string) StatefulSetBuilder + + // Automatically mounts secrets and certificates + WithSecret(ref *secret.Ref) StatefulSetBuilder + WithCertificate(ref *certificate.Ref) StatefulSetBuilder + WithConfigMap(name string) StatefulSetBuilder + + // Adds observability annotations + WithObservability() StatefulSetBuilder + + // Environment variables + WithEnv(env corev1.EnvVar) StatefulSetBuilder + WithEnvFrom(envFrom corev1.EnvFromSource) StatefulSetBuilder + + // Additional customization + WithVolume(volume corev1.Volume) StatefulSetBuilder + WithVolumeMount(mount corev1.VolumeMount) StatefulSetBuilder + WithResources(resources corev1.ResourceRequirements) StatefulSetBuilder + WithLabels(labels map[string]string) StatefulSetBuilder + WithAnnotations(annotations map[string]string) StatefulSetBuilder + + // Security contexts + WithPodSecurityContext(psc *corev1.PodSecurityContext) StatefulSetBuilder + WithSecurityContext(sc *corev1.SecurityContext) StatefulSetBuilder + + // Probes + WithLivenessProbe(probe *corev1.Probe) StatefulSetBuilder + WithReadinessProbe(probe *corev1.Probe) StatefulSetBuilder + WithStartupProbe(probe *corev1.Probe) StatefulSetBuilder + + // Scheduling + WithAffinity(affinity *corev1.Affinity) StatefulSetBuilder + + // StatefulSet-specific + WithServiceName(name string) StatefulSetBuilder + WithVolumeClaimTemplate(pvc corev1.PersistentVolumeClaim) StatefulSetBuilder + WithUpdateStrategy(strategy appsv1.StatefulSetUpdateStrategy) StatefulSetBuilder + WithPVCRetentionPolicy(policy *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy) StatefulSetBuilder + WithImagePullPolicy(policy corev1.PullPolicy) StatefulSetBuilder + WithTerminationGracePeriodSeconds(seconds int64) StatefulSetBuilder + WithPriorityClassName(className string) StatefulSetBuilder + + // Build the final StatefulSet + Build() (*appsv1.StatefulSet, error) +} + +// ServiceBuilder builds a Service with SDK integrations. +type ServiceBuilder interface { + WithName(name string) ServiceBuilder + WithNamespace(namespace string) ServiceBuilder + WithType(serviceType corev1.ServiceType) ServiceBuilder + WithPorts(ports []corev1.ServicePort) ServiceBuilder + WithSelector(selector map[string]string) ServiceBuilder + WithLabels(labels map[string]string) ServiceBuilder + WithAnnotations(annotations map[string]string) ServiceBuilder + + // Adds discovery labels for DiscoveryService + WithDiscoveryLabels() ServiceBuilder + + Build() (*corev1.Service, error) +} + +// ConfigMapBuilder builds a ConfigMap with SDK integrations. +type ConfigMapBuilder interface { + WithName(name string) ConfigMapBuilder + WithNamespace(namespace string) ConfigMapBuilder + WithData(data map[string]string) ConfigMapBuilder + WithBinaryData(binaryData map[string][]byte) ConfigMapBuilder + WithLabels(labels map[string]string) ConfigMapBuilder + WithAnnotations(annotations map[string]string) ConfigMapBuilder + + Build() (*corev1.ConfigMap, error) +} + +// PodBuilder builds Pod specifications. +// Useful for building pod specs that are shared across resources. +type PodBuilder interface { + WithContainers(containers []corev1.Container) PodBuilder + WithInitContainers(initContainers []corev1.Container) PodBuilder + WithVolumes(volumes []corev1.Volume) PodBuilder + WithServiceAccountName(serviceAccountName string) PodBuilder + WithSecurityContext(securityContext *corev1.PodSecurityContext) PodBuilder + WithAffinity(affinity *corev1.Affinity) PodBuilder + WithTolerations(tolerations []corev1.Toleration) PodBuilder + WithNodeSelector(nodeSelector map[string]string) PodBuilder + WithLabels(labels map[string]string) PodBuilder + WithAnnotations(annotations map[string]string) PodBuilder + + Build() (*corev1.PodSpec, error) +} + +// DeploymentBuilder builds a Deployment with SDK integrations. +type DeploymentBuilder interface { + WithName(name string) DeploymentBuilder + WithNamespace(namespace string) DeploymentBuilder + WithReplicas(replicas int32) DeploymentBuilder + WithImage(image string) DeploymentBuilder + WithPorts(ports []corev1.ContainerPort) DeploymentBuilder + WithCommand(command []string) DeploymentBuilder + WithArgs(args []string) DeploymentBuilder + + // Automatically mounts secrets and certificates + WithSecret(ref *secret.Ref) DeploymentBuilder + WithCertificate(ref *certificate.Ref) DeploymentBuilder + WithConfigMap(name string) DeploymentBuilder + + // Adds observability annotations + WithObservability() DeploymentBuilder + + // Environment variables + WithEnv(env corev1.EnvVar) DeploymentBuilder + WithEnvFrom(envFrom corev1.EnvFromSource) DeploymentBuilder + + // Additional customization + WithVolume(volume corev1.Volume) DeploymentBuilder + WithVolumeMount(mount corev1.VolumeMount) DeploymentBuilder + WithResources(resources corev1.ResourceRequirements) DeploymentBuilder + WithLabels(labels map[string]string) DeploymentBuilder + WithAnnotations(annotations map[string]string) DeploymentBuilder + + Build() (*appsv1.Deployment, error) +} diff --git a/pkg/platform-sdk/api/certificate/types.go b/pkg/platform-sdk/api/certificate/types.go new file mode 100644 index 000000000..190c17326 --- /dev/null +++ b/pkg/platform-sdk/api/certificate/types.go @@ -0,0 +1,205 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package certificate provides types for certificate resolution. +package certificate + +import ( + "time" +) + +// Binding describes what certificate you need. +// +// The SDK uses this to provision a TLS certificate via cert-manager +// (if available) or by generating a self-signed certificate. +// +// Example: +// +// cert, err := rctx.ResolveCertificate(certificate.Binding{ +// Name: "postgres-tls", +// DNSNames: []string{ +// "postgres.default.svc", +// "postgres.default.svc.cluster.local", +// "*.postgres.default.svc.cluster.local", +// }, +// Duration: ptrDuration(90 * 24 * time.Hour), +// }) +type Binding struct { + // Name for the certificate (becomes the Certificate CR name and Secret name). + // Required. + Name string + + // Namespace where the certificate should be created. + // If empty, defaults to the namespace from ReconcileContext. + Namespace string + + // DNSNames to include in the certificate. + // Required. Must have at least one DNS name. + // + // Example for a service: + // - "my-service.my-namespace.svc" + // - "my-service.my-namespace.svc.cluster.local" + // + // Example for a StatefulSet (include wildcard for pods): + // - "*.my-service.my-namespace.svc.cluster.local" + DNSNames []string + + // IPAddresses to include in the certificate (optional). + // Use this if you need the certificate to be valid for specific IP addresses. + IPAddresses []string + + // Duration before the certificate expires (optional). + // If not specified, uses the default from PlatformConfig (typically 90 days). + Duration *time.Duration + + // RenewBefore specifies how long before expiry to renew (optional). + // If not specified, uses the default from PlatformConfig (typically 30 days). + RenewBefore *time.Duration + + // Usages specifies the key usages for the certificate (optional). + // If not specified, defaults to: digital signature, key encipherment, server auth + Usages []string + + // IssuerRef overrides the certificate issuer from PlatformConfig (optional). + // Only used if you want a specific issuer for this certificate. + IssuerRef *IssuerRef + + // OwnerName is the name of the resource that owns this certificate (for cleanup). + // Automatically set by ReconcileContext. + OwnerName string + + // CRSpec is the certificate spec from the CR (for hierarchy resolution). + // Automatically set by ReconcileContext if the CR has a certificate spec. + CRSpec *Spec +} + +// IssuerRef references a cert-manager Issuer or ClusterIssuer. +type IssuerRef struct { + // Name of the Issuer or ClusterIssuer. + Name string + + // Kind is either "Issuer" or "ClusterIssuer". + // Defaults to "ClusterIssuer". + Kind string + + // Group is the API group (defaults to "cert-manager.io"). + Group string +} + +// Spec is the certificate specification from a CR. +// Feature controllers can include this in their CRs to allow users +// to override certificate settings. +type Spec struct { + // IssuerRef specifies which cert-manager issuer to use. + IssuerRef *IssuerRef + + // Duration for the certificate. + Duration *time.Duration + + // RenewBefore specifies when to renew. + RenewBefore *time.Duration + + // Usages for the certificate. + Usages []string +} + +// Ref is what you get back from ResolveCertificate - a reference to the certificate. +// +// The Ref tells you: +// - Where the certificate secret is (SecretName, Namespace) +// - Whether it's ready to use (Ready) +// - How it was provisioned (Provider: "cert-manager" or "self-signed") +// - Any error if it's not ready (Error) +// +// Example usage: +// +// cert, err := rctx.ResolveCertificate(binding) +// if err != nil { +// return ctrl.Result{}, err +// } +// +// if !cert.Ready { +// log.Info("Certificate not ready", "error", cert.Error) +// return ctrl.Result{RequeueAfter: 10 * time.Second}, nil +// } +// +// // Use cert.SecretName in your StatefulSet +// sts := rctx.BuildStatefulSet(). +// WithCertificate(cert). +// Build() +type Ref struct { + // SecretName is the Kubernetes secret containing the certificate. + // The secret has type kubernetes.io/tls and contains: + // - tls.crt: The certificate + // - tls.key: The private key + // - ca.crt: The CA certificate + SecretName string + + // Namespace where the secret is located. + Namespace string + + // Ready indicates if the certificate is issued and ready to use. + // + // Ready=true means the secret exists and contains a valid certificate. + // Ready=false means the certificate is being issued (cert-manager is working on it) + // or there's an error (check the Error field). + // + // When Ready=false, requeue after 10 seconds to check again. + Ready bool + + // Provider indicates how this certificate was provisioned. + // - "cert-manager": Provisioned by cert-manager + // - "self-signed": Generated as a self-signed certificate + Provider string + + // Error contains any error message if Ready is false. + // Examples: + // - "Certificate is being issued by cert-manager" + // - "cert-manager Issuer 'enterprise-ca' not found" + // - "Certificate failed issuance: rate limit exceeded" + Error string + + // NotBefore is when the certificate becomes valid. + NotBefore *time.Time + + // NotAfter is when the certificate expires. + NotAfter *time.Time + + // RenewalTime is when the certificate will be renewed. + RenewalTime *time.Time + + // MountPath is where the certificate should be mounted in the container. + // If not specified, defaults to /etc/certs/ + // + // For cert-manager compatibility, common paths are: + // - /mnt/tls - for TLS certificates (tls.crt, tls.key, ca.crt) + // - /mnt/ca-bundles - for CA bundle certificates + MountPath string +} + +// IsExpired returns true if the certificate has expired. +func (r *Ref) IsExpired() bool { + if r.NotAfter == nil { + return false + } + return time.Now().After(*r.NotAfter) +} + +// IsNearExpiry returns true if the certificate is within the renewal window. +func (r *Ref) IsNearExpiry() bool { + if r.RenewalTime == nil { + return false + } + return time.Now().After(*r.RenewalTime) +} diff --git a/pkg/platform-sdk/api/config/conversion.go b/pkg/platform-sdk/api/config/conversion.go new file mode 100644 index 000000000..9f6615f99 --- /dev/null +++ b/pkg/platform-sdk/api/config/conversion.go @@ -0,0 +1,216 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + platformv4 "github.com/splunk/splunk-operator/api/platform/v4" +) + +// FromAPIType converts from the public API type to internal SDK type. +// This allows the SDK to remain independent while using the operator-generated API. +func FromAPIType(apiConfig *platformv4.PlatformConfig) *PlatformConfig { + if apiConfig == nil { + return nil + } + + sdkConfig := &PlatformConfig{ + TypeMeta: apiConfig.TypeMeta, + ObjectMeta: apiConfig.ObjectMeta, + Spec: PlatformConfigSpec{ + Secrets: convertSecretsFromAPI(apiConfig.Spec.Secrets), + }, + Status: PlatformConfigStatus{ + Conditions: apiConfig.Status.Conditions, + ObservedGeneration: apiConfig.Status.ObservedGeneration, + }, + } + + return sdkConfig +} + +// ToAPIType converts from internal SDK type to the public API type. +func ToAPIType(sdkConfig *PlatformConfig) *platformv4.PlatformConfig { + if sdkConfig == nil { + return nil + } + + apiConfig := &platformv4.PlatformConfig{ + TypeMeta: sdkConfig.TypeMeta, + ObjectMeta: sdkConfig.ObjectMeta, + Spec: platformv4.PlatformConfigSpec{ + Secrets: convertSecretsToAPI(sdkConfig.Spec.Secrets), + }, + Status: platformv4.PlatformConfigStatus{ + Conditions: sdkConfig.Status.Conditions, + ObservedGeneration: sdkConfig.Status.ObservedGeneration, + }, + } + + return apiConfig +} + +// convertSecretsFromAPI converts API SecretConfig to SDK SecretConfig. +func convertSecretsFromAPI(apiSecrets platformv4.SecretConfig) SecretConfig { + sdkSecrets := SecretConfig{ + Provider: apiSecrets.Provider, + GenerateFallback: apiSecrets.GenerateFallback, + VersioningEnabled: apiSecrets.VersioningEnabled, + VersionsToKeep: apiSecrets.VersionsToKeep, + } + + // Convert CSI config if present + if apiSecrets.CSI != nil { + sdkSecrets.CSI = &CSISecretConfig{ + Driver: apiSecrets.CSI.Driver, + DefaultProvider: apiSecrets.CSI.DefaultProvider, + MountPath: apiSecrets.CSI.MountPath, + } + + // Convert naming config + if apiSecrets.CSI.Naming != nil { + sdkSecrets.CSI.Naming = &SecretNamingConfig{ + Pattern: apiSecrets.CSI.Naming.Pattern, + } + } + + // Convert Vault config + if apiSecrets.CSI.Vault != nil { + sdkSecrets.CSI.Vault = &VaultConfig{ + Address: apiSecrets.CSI.Vault.Address, + Role: apiSecrets.CSI.Vault.Role, + } + } + + // Convert AWS config + if apiSecrets.CSI.AWS != nil { + sdkSecrets.CSI.AWS = &AWSSecretsConfig{ + Region: apiSecrets.CSI.AWS.Region, + } + } + } + + return sdkSecrets +} + +// convertSecretsToAPI converts SDK SecretConfig to API SecretConfig. +func convertSecretsToAPI(sdkSecrets SecretConfig) platformv4.SecretConfig { + apiSecrets := platformv4.SecretConfig{ + Provider: sdkSecrets.Provider, + GenerateFallback: sdkSecrets.GenerateFallback, + VersioningEnabled: sdkSecrets.VersioningEnabled, + VersionsToKeep: sdkSecrets.VersionsToKeep, + } + + // Convert CSI config if present + if sdkSecrets.CSI != nil { + apiSecrets.CSI = &platformv4.CSISecretConfig{ + Driver: sdkSecrets.CSI.Driver, + DefaultProvider: sdkSecrets.CSI.DefaultProvider, + MountPath: sdkSecrets.CSI.MountPath, + } + + // Convert naming config + if sdkSecrets.CSI.Naming != nil { + apiSecrets.CSI.Naming = &platformv4.SecretNamingConfig{ + Pattern: sdkSecrets.CSI.Naming.Pattern, + } + } + + // Convert Vault config + if sdkSecrets.CSI.Vault != nil { + apiSecrets.CSI.Vault = &platformv4.VaultConfig{ + Address: sdkSecrets.CSI.Vault.Address, + Role: sdkSecrets.CSI.Vault.Role, + } + } + + // Convert AWS config + if sdkSecrets.CSI.AWS != nil { + apiSecrets.CSI.AWS = &platformv4.AWSSecretsConfig{ + Region: sdkSecrets.CSI.AWS.Region, + } + } + } + + return apiSecrets +} + +// TenantConfigFromAPIType converts from the public API TenantConfig type to internal SDK type. +func TenantConfigFromAPIType(apiConfig *platformv4.TenantConfig) *TenantConfig { + if apiConfig == nil { + return nil + } + + sdkConfig := &TenantConfig{ + TypeMeta: apiConfig.TypeMeta, + ObjectMeta: apiConfig.ObjectMeta, + Spec: TenantConfigSpec{ + Secrets: convertSecretsFromAPI(apiConfig.Spec.Secrets), + Certificates: convertCertificatesFromAPI(apiConfig.Spec.Certificates), + }, + Status: TenantConfigStatus{ + Conditions: apiConfig.Status.Conditions, + ObservedGeneration: apiConfig.Status.ObservedGeneration, + }, + } + + return sdkConfig +} + +// TenantConfigToAPIType converts from internal SDK TenantConfig type to the public API type. +func TenantConfigToAPIType(sdkConfig *TenantConfig) *platformv4.TenantConfig { + if sdkConfig == nil { + return nil + } + + apiConfig := &platformv4.TenantConfig{ + TypeMeta: sdkConfig.TypeMeta, + ObjectMeta: sdkConfig.ObjectMeta, + Spec: platformv4.TenantConfigSpec{ + Secrets: convertSecretsToAPI(sdkConfig.Spec.Secrets), + Certificates: convertCertificatesToAPI(sdkConfig.Spec.Certificates), + }, + Status: platformv4.TenantConfigStatus{ + Conditions: sdkConfig.Status.Conditions, + ObservedGeneration: sdkConfig.Status.ObservedGeneration, + }, + } + + return apiConfig +} + +// convertCertificatesFromAPI converts API CertificatesConfig to SDK CertificateConfig. +func convertCertificatesFromAPI(apiCerts platformv4.CertificatesConfig) CertificateConfig { + sdkCerts := CertificateConfig{ + Provider: apiCerts.Provider, + } + + // Note: API has CertManager and UserProvided configs but SDK has IssuerRef and different structure + // For now, just copy the provider. Full implementation would need more mapping. + + return sdkCerts +} + +// convertCertificatesToAPI converts SDK CertificateConfig to API CertificatesConfig. +func convertCertificatesToAPI(sdkCerts CertificateConfig) platformv4.CertificatesConfig { + apiCerts := platformv4.CertificatesConfig{ + Provider: sdkCerts.Provider, + } + + // Note: SDK has IssuerRef but API has CertManager config. + // For now, just copy the provider. Full implementation would need more mapping. + + return apiCerts +} diff --git a/pkg/platform-sdk/api/config/groupversion_info.go b/pkg/platform-sdk/api/config/groupversion_info.go new file mode 100644 index 000000000..7a528725d --- /dev/null +++ b/pkg/platform-sdk/api/config/groupversion_info.go @@ -0,0 +1,34 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package config contains internal SDK types for the platform.splunk.com API group +// NOTE: This package is for internal SDK use only. The public API is in api/platform/v4. +// Kubebuilder code generation is disabled for this package. +package config + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "platform.splunk.com", Version: "config"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/platform-sdk/api/config/types.go b/pkg/platform-sdk/api/config/types.go new file mode 100644 index 000000000..3f53a55ee --- /dev/null +++ b/pkg/platform-sdk/api/config/types.go @@ -0,0 +1,384 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package config provides configuration types for the Platform SDK. +package config + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PlatformConfig defines cluster-wide defaults for the Platform SDK. +// +// The platform admin creates one PlatformConfig to set defaults for +// certificate issuers, secret stores, observability, etc. +// +// NOTE: This is the internal SDK type. The API type is in api/platform/v4. +// This type is used for conversion and internal SDK operations. +type PlatformConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PlatformConfigSpec `json:"spec,omitempty"` + Status PlatformConfigStatus `json:"status,omitempty"` +} + +// PlatformConfigSpec defines the desired configuration for the platform. +type PlatformConfigSpec struct { + // Certificates configuration for cert-manager integration. + Certificates CertificateConfig `json:"certificates,omitempty"` + + // Secrets configuration for secret management. + Secrets SecretConfig `json:"secrets,omitempty"` + + // Observability configuration for monitoring and tracing. + Observability ObservabilityConfig `json:"observability,omitempty"` + + // Discovery configuration for service discovery. + Discovery DiscoveryConfig `json:"discovery,omitempty"` + + // Backup configuration for backup and restore. + Backup BackupConfig `json:"backup,omitempty"` +} + +// PlatformConfigStatus defines the observed state of PlatformConfig. +type PlatformConfigStatus struct { + // Conditions represent the latest available observations of the config's state. + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // ObservedGeneration is the generation observed by the controller. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// TenantConfig defines namespace-specific overrides for the Platform SDK. +// +// Tenant admins create TenantConfigs to override PlatformConfig defaults +// for their namespace. This enables multi-tenancy with different settings +// per tenant. +// +// NOTE: This is the internal SDK type. The API type should be in api/platform/v4. +// This type is used for conversion and internal SDK operations. +type TenantConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TenantConfigSpec `json:"spec,omitempty"` + Status TenantConfigStatus `json:"status,omitempty"` +} + +// TenantConfigSpec defines the desired configuration for a tenant. +type TenantConfigSpec struct { + // Certificates configuration overrides. + Certificates CertificateConfig `json:"certificates,omitempty"` + + // Secrets configuration overrides. + Secrets SecretConfig `json:"secrets,omitempty"` + + // Observability configuration overrides. + Observability ObservabilityConfig `json:"observability,omitempty"` + + // Discovery configuration overrides. + Discovery DiscoveryConfig `json:"discovery,omitempty"` + + // Backup configuration overrides. + Backup BackupConfig `json:"backup,omitempty"` +} + +// TenantConfigStatus defines the observed state of TenantConfig. +type TenantConfigStatus struct { + // Conditions represent the latest available observations of the config's state. + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // ObservedGeneration is the generation observed by the controller. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// CertificateConfig configures certificate management. +type CertificateConfig struct { + // Provider specifies the certificate provider to use. + // Valid values: "cert-manager", "self-signed" + // Default: "self-signed" + Provider string `json:"provider,omitempty"` + + // IssuerRef references the cert-manager Issuer or ClusterIssuer. + // Only used when Provider is "cert-manager". + IssuerRef *IssuerRef `json:"issuerRef,omitempty"` + + // Duration is the default certificate duration. + // Default: 2160h (90 days) + Duration *metav1.Duration `json:"duration,omitempty"` + + // RenewBefore is when to renew before expiry. + // Default: 720h (30 days) + RenewBefore *metav1.Duration `json:"renewBefore,omitempty"` + + // Usages are the default key usages. + // Default: ["digital signature", "key encipherment", "server auth"] + Usages []string `json:"usages,omitempty"` +} + +// IssuerRef references a cert-manager Issuer or ClusterIssuer. +type IssuerRef struct { + // Name of the Issuer or ClusterIssuer. + Name string `json:"name"` + + // Kind is "Issuer" or "ClusterIssuer". + // Default: "ClusterIssuer" + Kind string `json:"kind,omitempty"` + + // Group is the API group. + // Default: "cert-manager.io" + Group string `json:"group,omitempty"` +} + +// SecretConfig configures secret management. +type SecretConfig struct { + // Provider specifies the secret provider to use. + // Valid values: "external-secrets", "kubernetes", "csi" + // Default: "kubernetes" + Provider string `json:"provider,omitempty"` + + // GenerateFallback indicates whether to generate fallback secrets. + // Only for development environments. + // Default: false + GenerateFallback bool `json:"generateFallback,omitempty"` + + // VersioningEnabled enables versioned secrets for Splunk resources. + // Default: true + VersioningEnabled bool `json:"versioningEnabled,omitempty"` + + // VersionsToKeep is how many versions to keep. + // Default: 3 + VersionsToKeep int `json:"versionsToKeep,omitempty"` + + // CSI configures CSI-based secret management. + // Only used when Provider is "csi". + CSI *CSISecretConfig `json:"csi,omitempty"` +} + +// CSISecretConfig configures CSI-based secret management. +type CSISecretConfig struct { + // Driver is the CSI driver to use. + // Default: "secrets-store.csi.k8s.io" + Driver string `json:"driver,omitempty"` + + // DefaultProvider is the default CSI provider. + // Valid values: "vault", "aws", "azure", "gcp" + DefaultProvider string `json:"defaultProvider,omitempty"` + + // Naming configures SecretProviderClass naming pattern. + Naming *SecretNamingConfig `json:"naming,omitempty"` + + // MountPath is the default mount path for CSI secrets. + // Default: "/mnt/secrets" + MountPath string `json:"mountPath,omitempty"` + + // Vault configures Vault-specific settings. + // Only used when DefaultProvider is "vault". + Vault *VaultConfig `json:"vault,omitempty"` + + // AWS configures AWS Secrets Manager settings. + // Only used when DefaultProvider is "aws". + AWS *AWSSecretsConfig `json:"aws,omitempty"` +} + +// SecretNamingConfig configures secret naming pattern for CSI. +type SecretNamingConfig struct { + // Pattern is the naming pattern for SecretProviderClass resources. + // Supports variable substitution: + // - ${namespace}: Kubernetes namespace + // - ${service}: Service type (e.g., "standalone", "clustermanager") + // - ${instance}: Instance name (e.g., "my-splunk") + // Example: "${service}-${instance}-secrets" -> "standalone-my-splunk-secrets" + // Default: "${service}-${instance}-secrets" + Pattern string `json:"pattern,omitempty"` +} + +// VaultConfig configures Vault CSI provider. +type VaultConfig struct { + // Address is the Vault server address. + // Example: "https://vault.company.com" + Address string `json:"address,omitempty"` + + // Role is the Vault role to use for authentication. + Role string `json:"role,omitempty"` +} + +// AWSSecretsConfig configures AWS Secrets Manager CSI provider. +type AWSSecretsConfig struct { + // Region is the AWS region. + // Example: "us-west-2" + Region string `json:"region,omitempty"` +} + +// ObservabilityConfig configures observability integration. +type ObservabilityConfig struct { + // Enabled indicates if observability is enabled. + // Default: true + Enabled bool `json:"enabled,omitempty"` + + // Provider specifies the observability provider. + // Valid values: "opentelemetry", "prometheus", "datadog" + // Default: "opentelemetry" + Provider string `json:"provider,omitempty"` + + // OTelCollectorMode specifies the OTel Collector deployment mode. + // Valid values: "sidecar", "daemonset", "deployment" + // Default: "daemonset" + OTelCollectorMode string `json:"otelCollectorMode,omitempty"` + + // PrometheusAnnotations configures Prometheus scraping. + PrometheusAnnotations PrometheusAnnotations `json:"prometheusAnnotations,omitempty"` + + // SamplingRate for traces (0.0 to 1.0). + // Default: 0.1 (10%) + // +kubebuilder:validation:Type=number + // +kubebuilder:validation:Format=double + SamplingRate float64 `json:"samplingRate,omitempty"` +} + +// PrometheusAnnotations configures Prometheus scraping annotations. +type PrometheusAnnotations struct { + // Scrape indicates if Prometheus should scrape this pod. + // Default: true + Scrape bool `json:"scrape,omitempty"` + + // Port for Prometheus metrics. + // Default: 9090 + Port int `json:"port,omitempty"` + + // Path for Prometheus metrics. + // Default: /metrics + Path string `json:"path,omitempty"` +} + +// DiscoveryConfig configures service discovery. +type DiscoveryConfig struct { + // Enabled indicates if service discovery is enabled. + // Default: true + Enabled bool `json:"enabled,omitempty"` + + // CacheTTL is the cache TTL for discovery results. + // Default: 30s + CacheTTL *metav1.Duration `json:"cacheTTL,omitempty"` + + // HealthCheckEnabled indicates if health checks are enabled. + // Default: true + HealthCheckEnabled bool `json:"healthCheckEnabled,omitempty"` + + // HealthCheckTimeout is the timeout for health checks. + // Default: 5s + HealthCheckTimeout *metav1.Duration `json:"healthCheckTimeout,omitempty"` +} + +// BackupConfig configures backup and restore. +type BackupConfig struct { + // Enabled indicates if backup is enabled. + // Default: false + Enabled bool `json:"enabled,omitempty"` + + // Provider specifies the backup storage provider. + // Valid values: "s3", "azure", "gcs", "nfs" + Provider string `json:"provider,omitempty"` + + // Bucket or container name for backups. + Bucket string `json:"bucket,omitempty"` + + // Retention policy for backups. + Retention RetentionPolicy `json:"retention,omitempty"` +} + +// RetentionPolicy defines backup retention. +type RetentionPolicy struct { + // KeepLast is the number of most recent backups to keep. + // Default: 5 + KeepLast int `json:"keepLast,omitempty"` + + // KeepDaily is the number of daily backups to keep. + // Default: 7 + KeepDaily int `json:"keepDaily,omitempty"` + + // KeepWeekly is the number of weekly backups to keep. + // Default: 4 + KeepWeekly int `json:"keepWeekly,omitempty"` + + // KeepMonthly is the number of monthly backups to keep. + // Default: 12 + KeepMonthly int `json:"keepMonthly,omitempty"` +} + +// PlatformConfigList contains a list of PlatformConfig. +// +kubebuilder:object:root=true +type PlatformConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PlatformConfig `json:"items"` +} + +// TenantConfigList contains a list of TenantConfig. +// +kubebuilder:object:root=true +type TenantConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TenantConfig `json:"items"` +} + +// Helper functions for default values + +// GetCertificateDuration returns the certificate duration with defaults. +func (c *CertificateConfig) GetCertificateDuration() time.Duration { + if c.Duration != nil { + return c.Duration.Duration + } + return 90 * 24 * time.Hour // 90 days +} + +// GetCertificateRenewBefore returns the renew before duration with defaults. +func (c *CertificateConfig) GetCertificateRenewBefore() time.Duration { + if c.RenewBefore != nil { + return c.RenewBefore.Duration + } + return 30 * 24 * time.Hour // 30 days +} + +// GetVersionsToKeep returns the versions to keep with defaults. +func (s *SecretConfig) GetVersionsToKeep() int { + if s.VersionsToKeep > 0 { + return s.VersionsToKeep + } + return 3 +} + +// GetCacheTTL returns the cache TTL with defaults. +func (d *DiscoveryConfig) GetCacheTTL() time.Duration { + if d.CacheTTL != nil { + return d.CacheTTL.Duration + } + return 30 * time.Second +} + +// GetHealthCheckTimeout returns the health check timeout with defaults. +func (d *DiscoveryConfig) GetHealthCheckTimeout() time.Duration { + if d.HealthCheckTimeout != nil { + return d.HealthCheckTimeout.Duration + } + return 5 * time.Second +} + +func init() { + SchemeBuilder.Register(&PlatformConfig{}, &PlatformConfigList{}) + SchemeBuilder.Register(&TenantConfig{}, &TenantConfigList{}) +} diff --git a/pkg/platform-sdk/api/config/zz_generated.deepcopy.go b/pkg/platform-sdk/api/config/zz_generated.deepcopy.go new file mode 100644 index 000000000..72e494e2e --- /dev/null +++ b/pkg/platform-sdk/api/config/zz_generated.deepcopy.go @@ -0,0 +1,376 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by controller-gen. DO NOT EDIT. + +package config + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupConfig) DeepCopyInto(out *BackupConfig) { + *out = *in + out.Retention = in.Retention +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupConfig. +func (in *BackupConfig) DeepCopy() *BackupConfig { + if in == nil { + return nil + } + out := new(BackupConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateConfig) DeepCopyInto(out *CertificateConfig) { + *out = *in + if in.IssuerRef != nil { + in, out := &in.IssuerRef, &out.IssuerRef + *out = new(IssuerRef) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(v1.Duration) + **out = **in + } + if in.RenewBefore != nil { + in, out := &in.RenewBefore, &out.RenewBefore + *out = new(v1.Duration) + **out = **in + } + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateConfig. +func (in *CertificateConfig) DeepCopy() *CertificateConfig { + if in == nil { + return nil + } + out := new(CertificateConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiscoveryConfig) DeepCopyInto(out *DiscoveryConfig) { + *out = *in + if in.CacheTTL != nil { + in, out := &in.CacheTTL, &out.CacheTTL + *out = new(v1.Duration) + **out = **in + } + if in.HealthCheckTimeout != nil { + in, out := &in.HealthCheckTimeout, &out.HealthCheckTimeout + *out = new(v1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiscoveryConfig. +func (in *DiscoveryConfig) DeepCopy() *DiscoveryConfig { + if in == nil { + return nil + } + out := new(DiscoveryConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IssuerRef) DeepCopyInto(out *IssuerRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerRef. +func (in *IssuerRef) DeepCopy() *IssuerRef { + if in == nil { + return nil + } + out := new(IssuerRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityConfig) DeepCopyInto(out *ObservabilityConfig) { + *out = *in + out.PrometheusAnnotations = in.PrometheusAnnotations +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityConfig. +func (in *ObservabilityConfig) DeepCopy() *ObservabilityConfig { + if in == nil { + return nil + } + out := new(ObservabilityConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformConfig) DeepCopyInto(out *PlatformConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformConfig. +func (in *PlatformConfig) DeepCopy() *PlatformConfig { + if in == nil { + return nil + } + out := new(PlatformConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlatformConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformConfigList) DeepCopyInto(out *PlatformConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PlatformConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformConfigList. +func (in *PlatformConfigList) DeepCopy() *PlatformConfigList { + if in == nil { + return nil + } + out := new(PlatformConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlatformConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformConfigSpec) DeepCopyInto(out *PlatformConfigSpec) { + *out = *in + in.Certificates.DeepCopyInto(&out.Certificates) + out.Secrets = in.Secrets + out.Observability = in.Observability + in.Discovery.DeepCopyInto(&out.Discovery) + out.Backup = in.Backup +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformConfigSpec. +func (in *PlatformConfigSpec) DeepCopy() *PlatformConfigSpec { + if in == nil { + return nil + } + out := new(PlatformConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformConfigStatus) DeepCopyInto(out *PlatformConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformConfigStatus. +func (in *PlatformConfigStatus) DeepCopy() *PlatformConfigStatus { + if in == nil { + return nil + } + out := new(PlatformConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusAnnotations) DeepCopyInto(out *PrometheusAnnotations) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusAnnotations. +func (in *PrometheusAnnotations) DeepCopy() *PrometheusAnnotations { + if in == nil { + return nil + } + out := new(PrometheusAnnotations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicy) DeepCopyInto(out *RetentionPolicy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicy. +func (in *RetentionPolicy) DeepCopy() *RetentionPolicy { + if in == nil { + return nil + } + out := new(RetentionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretConfig) DeepCopyInto(out *SecretConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretConfig. +func (in *SecretConfig) DeepCopy() *SecretConfig { + if in == nil { + return nil + } + out := new(SecretConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantConfig) DeepCopyInto(out *TenantConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantConfig. +func (in *TenantConfig) DeepCopy() *TenantConfig { + if in == nil { + return nil + } + out := new(TenantConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TenantConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantConfigList) DeepCopyInto(out *TenantConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TenantConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantConfigList. +func (in *TenantConfigList) DeepCopy() *TenantConfigList { + if in == nil { + return nil + } + out := new(TenantConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TenantConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantConfigSpec) DeepCopyInto(out *TenantConfigSpec) { + *out = *in + in.Certificates.DeepCopyInto(&out.Certificates) + out.Secrets = in.Secrets + out.Observability = in.Observability + in.Discovery.DeepCopyInto(&out.Discovery) + out.Backup = in.Backup +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantConfigSpec. +func (in *TenantConfigSpec) DeepCopy() *TenantConfigSpec { + if in == nil { + return nil + } + out := new(TenantConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantConfigStatus) DeepCopyInto(out *TenantConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantConfigStatus. +func (in *TenantConfigStatus) DeepCopy() *TenantConfigStatus { + if in == nil { + return nil + } + out := new(TenantConfigStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/platform-sdk/api/context.go b/pkg/platform-sdk/api/context.go new file mode 100644 index 000000000..4a60681b1 --- /dev/null +++ b/pkg/platform-sdk/api/context.go @@ -0,0 +1,247 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/builders" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/discovery" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" + "k8s.io/client-go/tools/record" +) + +// ReconcileContext provides SDK capabilities for a single reconciliation. +// +// ReconcileContext is a lightweight, short-lived object that knows about +// the specific resource being reconciled (its namespace and name) and +// provides convenient access to all SDK capabilities. +// +// Usage: +// +// func (r *MyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +// // Create a context for this reconciliation +// rctx := r.sdkRuntime.NewReconcileContext(ctx, req.Namespace, req.Name) +// +// // Resolve a certificate +// cert, err := rctx.ResolveCertificate(certificate.Binding{ +// Name: "my-tls", +// DNSNames: []string{"my-service.default.svc"}, +// }) +// if err != nil || !cert.Ready { +// return ctrl.Result{RequeueAfter: 10 * time.Second}, err +// } +// +// // Resolve a secret +// secret, err := rctx.ResolveSecret(secret.Binding{ +// Name: "my-credentials", +// Keys: []string{"username", "password"}, +// }) +// if err != nil || !secret.Ready { +// return ctrl.Result{RequeueAfter: 10 * time.Second}, err +// } +// +// // Build a StatefulSet +// sts, err := rctx.BuildStatefulSet(). +// WithName("my-app"). +// WithSecret(secret). +// WithCertificate(cert). +// Build() +// if err != nil { +// return ctrl.Result{}, err +// } +// +// return ctrl.Result{}, nil +// } +type ReconcileContext interface { + // Context information + + // Namespace returns the namespace of the resource being reconciled. + Namespace() string + + // Name returns the name of the resource being reconciled. + Name() string + + // Context returns the Go context for this reconciliation. + Context() context.Context + + // Logger returns a logger configured with resource context. + // The logger includes namespace and name fields automatically. + Logger() logr.Logger + + // EventRecorder returns an event recorder for emitting Kubernetes events. + // Use this to record state changes that should be visible via kubectl describe. + // + // Example: + // rctx.EventRecorder().Event(obj, corev1.EventTypeNormal, "CertificateReady", "Certificate has been provisioned") + EventRecorder() record.EventRecorder + + // Certificate resolution + + // ResolveCertificate requests a TLS certificate with the specified DNS names. + // + // The SDK automatically: + // - Checks if cert-manager is installed and uses it if available + // - Falls back to self-signed certificates if cert-manager is not available + // - Creates Certificate CRs (for cert-manager) or Secrets (for self-signed) + // - Watches for certificate readiness + // - Returns a Ref with the secret name and Ready status + // + // Returns: + // - Ref with Ready=true when the certificate is issued and ready + // - Ref with Ready=false when the certificate is being issued (requeue after 10s) + // - Error if there's a fatal problem (API unreachable, invalid configuration) + // + // Example: + // + // cert, err := rctx.ResolveCertificate(certificate.Binding{ + // Name: "postgres-tls", + // DNSNames: []string{"postgres.default.svc"}, + // }) + // if err != nil { + // return ctrl.Result{}, err + // } + // if !cert.Ready { + // return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + // } + ResolveCertificate(binding certificate.Binding) (*certificate.Ref, error) + + // Secret resolution + + // ResolveSecret validates that a Kubernetes secret exists and has required keys. + // + // The SDK does NOT create ExternalSecret CRs. That's the admin's job via kubectl/GitOps. + // The SDK only validates the resulting Kubernetes secret exists. + // + // For Splunk secrets (SecretTypeSplunk), the SDK manages versioned secrets + // (splunk-standalone-secret-v1, v2, v3) for safe rotation. + // + // Returns: + // - Ref with Ready=true when the secret exists and has all required keys + // - Ref with Ready=false when the secret doesn't exist or is missing keys + // - Error if there's a fatal problem (API unreachable, invalid configuration) + // + // Example: + // + // secret, err := rctx.ResolveSecret(secret.Binding{ + // Name: "postgres-credentials", + // Keys: []string{"username", "password"}, + // }) + // if err != nil { + // return ctrl.Result{}, err + // } + // if !secret.Ready { + // return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + // } + ResolveSecret(binding secret.Binding) (*secret.Ref, error) + + // Service discovery + + // DiscoverSplunk finds Splunk instances (Standalone, SearchHeadCluster, IndexerCluster, etc.). + // + // The SDK searches for: + // - SOK-managed Splunk CRs (in Kubernetes) + // - ExternalSplunkCluster CRs (pointing to Splunk on VMs/external systems) + // + // Results are sorted by health (healthy endpoints first) and cached for 30 seconds. + // + // Example: + // + // endpoints, err := rctx.DiscoverSplunk(discovery.SplunkSelector{ + // Type: discovery.SplunkTypeSearchHeadCluster, + // IncludeExternal: true, + // }) + // if err != nil { + // return ctrl.Result{}, err + // } + // if len(endpoints) == 0 { + // return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + // } + // splunkURL := endpoints[0].URL + DiscoverSplunk(selector discovery.SplunkSelector) ([]discovery.SplunkEndpoint, error) + + // Discover finds generic Kubernetes services by labels. + // + // Use this for discovering services that aren't Splunk instances + // (databases, message queues, etc.). + // + // Example: + // + // endpoints, err := rctx.Discover(discovery.Selector{ + // Labels: map[string]string{ + // "app": "postgresql", + // }, + // }) + // if err != nil { + // return ctrl.Result{}, err + // } + Discover(selector discovery.Selector) ([]discovery.Endpoint, error) + + // Configuration resolution + + // ResolveConfig reads configuration with proper hierarchy. + // + // Hierarchy: CR spec > TenantConfig > PlatformConfig > Built-in defaults + // + // Key examples: "certificates.issuer", "secrets.store", "observability.enabled" + // + // Most of the time, you don't need to call this directly. The SDK's + // ResolveCertificate and ResolveSecret methods handle configuration internally. + ResolveConfig(key string) (interface{}, error) + + // Resource builders + + // BuildStatefulSet creates a fluent builder for StatefulSets. + // + // The builder automatically: + // - Mounts secrets at /mnt/splunk-secrets + // - Mounts certificates at /mnt/tls and /mnt/ca-bundle + // - Adds observability annotations (Prometheus) + // - Sets owner references for garbage collection + // - Applies SDK conventions for labels and selectors + // + // Example: + // + // sts, err := rctx.BuildStatefulSet(). + // WithName("postgres"). + // WithReplicas(1). + // WithImage("postgres:15"). + // WithSecret(secret). + // WithCertificate(cert). + // WithObservability(). + // Build() + BuildStatefulSet() builders.StatefulSetBuilder + + // BuildService creates a fluent builder for Services. + // + // The builder automatically: + // - Adds discovery labels for DiscoveryService + // - Sets owner references + // - Applies SDK conventions + BuildService() builders.ServiceBuilder + + // BuildConfigMap creates a fluent builder for ConfigMaps. + BuildConfigMap() builders.ConfigMapBuilder + + // BuildPod creates a fluent builder for Pod specifications. + // Useful for building pod specs that are shared across resources. + BuildPod() builders.PodBuilder + + // BuildDeployment creates a fluent builder for Deployments. + // Use this for stateless workloads. + BuildDeployment() builders.DeploymentBuilder +} diff --git a/pkg/platform-sdk/api/discovery/types.go b/pkg/platform-sdk/api/discovery/types.go new file mode 100644 index 000000000..7f803e262 --- /dev/null +++ b/pkg/platform-sdk/api/discovery/types.go @@ -0,0 +1,114 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package discovery provides types for service discovery. +package discovery + +import "time" + +// SplunkSelector specifies criteria for discovering Splunk instances. +type SplunkSelector struct { + // Type of Splunk instance to find. + Type SplunkType + + // Namespace to search (empty = all namespaces in cluster-scoped mode). + Namespace string + + // Labels to match (optional, additional filtering). + Labels map[string]string + + // IncludeExternal includes ExternalSplunkCluster resources. + IncludeExternal bool +} + +// SplunkType identifies the type of Splunk instance. +type SplunkType string + +const ( + SplunkTypeStandalone SplunkType = "Standalone" + SplunkTypeSearchHeadCluster SplunkType = "SearchHeadCluster" + SplunkTypeIndexerCluster SplunkType = "IndexerCluster" + SplunkTypeClusterManager SplunkType = "ClusterManager" + SplunkTypeLicenseManager SplunkType = "LicenseManager" + SplunkTypeMonitoringConsole SplunkType = "MonitoringConsole" +) + +// SplunkEndpoint represents a discovered Splunk instance. +type SplunkEndpoint struct { + // Name of the Splunk instance. + Name string + + // Type of Splunk instance. + Type SplunkType + + // URL to connect (e.g., https://splunk-shc.default.svc:8089). + URL string + + // IsExternal indicates if this is an external (non-Kubernetes) instance. + IsExternal bool + + // Namespace (for Kubernetes instances). + Namespace string + + // Health status. + Health *HealthStatus + + // Labels on the resource. + Labels map[string]string +} + +// Selector specifies criteria for discovering generic Kubernetes services. +type Selector struct { + // Namespace to search (empty = all namespaces). + Namespace string + + // Labels to match. + Labels map[string]string + + // ServiceType to filter (optional). + ServiceType string +} + +// Endpoint represents a discovered Kubernetes service. +type Endpoint struct { + // Name of the service. + Name string + + // URL to connect. + URL string + + // Namespace. + Namespace string + + // Labels on the service. + Labels map[string]string + + // Health status. + Health *HealthStatus +} + +// HealthStatus indicates the health of an endpoint. +type HealthStatus struct { + // Healthy indicates if the service is reachable and responding. + Healthy bool + + // LastChecked timestamp. + LastChecked time.Time + + // Error message if not healthy. + Error string + + // ResponseTime for the health check. + ResponseTime time.Duration +} diff --git a/pkg/platform-sdk/api/doc.go b/pkg/platform-sdk/api/doc.go new file mode 100644 index 000000000..f07915d80 --- /dev/null +++ b/pkg/platform-sdk/api/doc.go @@ -0,0 +1,150 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package api defines the public interfaces for the Platform SDK. + +The Platform SDK provides stable interfaces for Kubernetes controllers to: +- Resolve TLS certificates via cert-manager or self-signed fallback +- Validate secrets from External Secrets Operator or Kubernetes +- Discover Splunk instances and other services dynamically +- Build Kubernetes resources with best practices + +# Quick Start + +Create a Runtime instance in your controller: + + func (r *MyReconciler) SetupWithManager(mgr ctrl.Manager) error { + runtime, err := sdk.NewRuntime(mgr.GetClient(), + sdk.WithClusterScoped(), + sdk.WithLogger(log), + ) + if err != nil { + return err + } + + if err := runtime.Start(context.Background()); err != nil { + return err + } + + r.sdkRuntime = runtime + return ctrl.NewControllerManagedBy(mgr).For(&v1.MyResource{}).Complete(r) + } + +Use the SDK in your Reconcile method: + + func (r *MyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + rctx := r.sdkRuntime.NewReconcileContext(ctx, req.Namespace, req.Name) + + // Resolve a certificate + cert, err := rctx.ResolveCertificate(certificate.Binding{ + Name: "my-tls", + DNSNames: []string{"my-service.default.svc"}, + }) + if err != nil || !cert.Ready { + return ctrl.Result{RequeueAfter: 10 * time.Second}, err + } + + // Resolve a secret + secret, err := rctx.ResolveSecret(secret.Binding{ + Name: "my-credentials", + Keys: []string{"username", "password"}, + }) + if err != nil || !secret.Ready { + return ctrl.Result{RequeueAfter: 10 * time.Second}, err + } + + // Build a StatefulSet + sts, err := rctx.BuildStatefulSet(). + WithName("my-app"). + WithImage("my-app:latest"). + WithSecret(secret). + WithCertificate(cert). + WithObservability(). + Build() + if err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, r.Create(ctx, sts) + } + +# Package Structure + + api/ - Public interfaces (this package) + certificate/ - Certificate types + secret/ - Secret types + discovery/ - Service discovery types + builders/ - Resource builder interfaces + internal/ - Internal implementations (not exported) + services/ - Service layer (resolvers, discovery, etc.) + providers/ - Provider implementations (cert-manager, ESO, etc.) + pkg/ - Shared utilities (cache, registry, etc.) + +# Design Patterns + +The SDK uses several key design patterns: + +Factory Pattern: Runtime creates ReconcileContext, ReconcileContext creates Builders +Builder Pattern: Fluent API with method chaining for resource construction +Provider Pattern: Pluggable implementations (CertManagerProvider, SelfSignedProvider) +Singleton Pattern: Runtime singleton per operator +Dependency Injection: Constructor and context injection + +# Key Concepts + +Resource Resolution: The SDK uses a "Ready" pattern. Methods return a Ref with a Ready field. +Ready=true means the resource is available. Ready=false means "come back later" (requeue). + +Configuration Hierarchy: CR spec > TenantConfig > PlatformConfig > Built-in defaults. +The SDK automatically resolves configuration from multiple sources. + +Service Discovery: The SDK caches discovery results for 30 seconds and performs health checks. + +Observability: The SDK automatically adds Prometheus annotations when requested. + +# Error Handling + +The SDK distinguishes between fatal errors and "not ready yet": + + resource, err := rctx.ResolveSomething(binding) + if err != nil { + // Fatal error - API is down, SDK is broken, etc. + return ctrl.Result{}, err + } + + if !resource.Ready { + // NOT an error! Just not ready yet. Check resource.Error for details. + log.Info("Resource pending", "reason", resource.Error) + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + + // Resource is ready, use it + +# Testing + +The SDK interfaces are designed for testing with mocks: + + type MockRuntime struct { + Certificates map[string]*certificate.Ref + Secrets map[string]*secret.Ref + } + + func (m *MockRuntime) NewReconcileContext(ctx, namespace, name) ReconcileContext { + return &MockReconcileContext{runtime: m, namespace: namespace, name: name} + } + +See the SDK documentation for complete examples and patterns. +*/ +package api diff --git a/pkg/platform-sdk/api/events.go b/pkg/platform-sdk/api/events.go new file mode 100644 index 000000000..4f66982ca --- /dev/null +++ b/pkg/platform-sdk/api/events.go @@ -0,0 +1,79 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +// Event reasons for Platform SDK operations. +// These constants are used with EventRecorder to emit Kubernetes events +// that are visible via kubectl describe. +// +// Usage: +// +// rctx.EventRecorder().Event(obj, corev1.EventTypeNormal, EventReasonCertificateProvisioned, +// "Certificate my-tls has been provisioned by cert-manager") +const ( + // Certificate events + EventReasonCertificateProvisioned = "CertificateProvisioned" + EventReasonCertificateReady = "CertificateReady" + EventReasonCertificateFailed = "CertificateFailed" + EventReasonCertificateRenewing = "CertificateRenewing" + EventReasonSelfSignedCertCreated = "SelfSignedCertCreated" + + // Secret events + EventReasonSecretValidated = "SecretValidated" + EventReasonSecretRotated = "SecretRotated" + EventReasonSecretVersionCreated = "SecretVersionCreated" + EventReasonSecretMissing = "SecretMissing" + EventReasonSecretInvalid = "SecretInvalid" + + // Discovery events + EventReasonServiceDiscovered = "ServiceDiscovered" + EventReasonServiceUnavailable = "ServiceUnavailable" + + // Observability events + EventReasonObservabilityEnabled = "ObservabilityEnabled" + EventReasonObservabilityFailed = "ObservabilityFailed" + + // Configuration events + EventReasonConfigLoaded = "ConfigLoaded" + EventReasonConfigInvalid = "ConfigInvalid" + EventReasonConfigChanged = "ConfigChanged" +) + +// Log levels for Platform SDK operations. +// The SDK uses logr which supports V-levels for log verbosity: +// +// V(0) - Info level: Important state changes, errors +// V(1) - Debug level: Detailed operation logs, cache hits/misses +// V(2) - Trace level: Very detailed logs including all API calls +// +// Usage in services: +// +// logger.Info("Certificate provisioned", "name", cert.Name) // Always logged +// logger.V(1).Info("Using cached certificate", "name", cert.Name) // Only in debug mode +// logger.V(2).Info("Calling cert-manager API", "endpoint", url) // Only in trace mode +// logger.Error(err, "Failed to provision certificate", "name", cert.Name) // Always logged +const ( + // LogLevelInfo (V=0) logs important state changes and errors. + // These logs are always visible and should be used sparingly. + LogLevelInfo = 0 + + // LogLevelDebug (V=1) logs detailed operation information. + // Enable with --zap-log-level=debug or --v=1 + LogLevelDebug = 1 + + // LogLevelTrace (V=2) logs very detailed information including all API calls. + // Enable with --zap-log-level=trace or --v=2 + LogLevelTrace = 2 +) diff --git a/pkg/platform-sdk/api/runtime.go b/pkg/platform-sdk/api/runtime.go new file mode 100644 index 000000000..d63b2c33c --- /dev/null +++ b/pkg/platform-sdk/api/runtime.go @@ -0,0 +1,169 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package api defines the public interfaces for the Platform SDK. +// These interfaces provide stable contracts for feature controllers to use +// certificates, secrets, service discovery, and resource builders. +package api + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Runtime is the main SDK entry point. +// Create one instance per controller and reuse it across reconciliations. +// +// The Runtime manages the lifecycle of all SDK services (resolvers, providers, +// discovery, observability) and provides ReconcileContext objects for each +// reconciliation. +// +// Usage: +// +// runtime, err := sdk.NewRuntime(mgr.GetClient(), +// sdk.WithClusterScoped(), +// sdk.WithLogger(log), +// ) +// if err != nil { +// return err +// } +// +// if err := runtime.Start(ctx); err != nil { +// return err +// } +// +// // Use runtime across reconciliations +// rctx := runtime.NewReconcileContext(ctx, namespace, name) +type Runtime interface { + // NewReconcileContext creates a context for a single reconciliation. + // Call this at the start of each Reconcile() invocation. + // + // The ReconcileContext is lightweight and short-lived - one per reconciliation. + // It provides access to all SDK capabilities with the resource's context. + NewReconcileContext(ctx context.Context, namespace, name string) ReconcileContext + + // Start initializes the SDK runtime. + // Call this in controller's SetupWithManager or main(). + // + // Start performs: + // - Initializes all services (resolvers, providers, discovery) + // - Starts background watchers for configuration changes + // - Validates cluster capabilities (cert-manager, ESO, etc.) + Start(ctx context.Context) error + + // Stop gracefully shuts down the SDK. + // Call this on controller shutdown. + // + // Stop ensures: + // - All watches are stopped + // - Connections are closed + // - Background goroutines are terminated + Stop() error + + // GetClient returns the Kubernetes client used by the SDK. + GetClient() client.Client + + // GetLogger returns the logger configured for the SDK. + GetLogger() logr.Logger + + // GetEventRecorder returns the event recorder for emitting Kubernetes events. + GetEventRecorder() record.EventRecorder +} + +// RuntimeOption configures the Runtime during creation. +type RuntimeOption func(*RuntimeConfig) + +// RuntimeConfig holds configuration for the Runtime. +type RuntimeConfig struct { + // ClusterScoped indicates if the operator watches all namespaces. + // If false, the operator is namespace-scoped. + ClusterScoped bool + + // Namespace is the namespace the operator is scoped to (if not cluster-scoped). + Namespace string + + // Logger for SDK operations. + Logger logr.Logger + + // EventRecorder for emitting Kubernetes events. + // Used to record state changes like certificate provisioning, secret rotation, etc. + EventRecorder record.EventRecorder + + // DryRun mode for testing (no actual changes to cluster). + DryRun bool + + // EnableMetrics enables Prometheus metrics for SDK operations. + EnableMetrics bool + + // MetricsPort is the port for Prometheus metrics (default: 8383). + MetricsPort int +} + +// WithClusterScoped configures the Runtime for cluster-scoped operation. +// The operator can watch and manage resources across all namespaces. +func WithClusterScoped() RuntimeOption { + return func(cfg *RuntimeConfig) { + cfg.ClusterScoped = true + } +} + +// WithNamespace configures the Runtime for namespace-scoped operation. +// The operator can only watch and manage resources in the specified namespace. +func WithNamespace(namespace string) RuntimeOption { + return func(cfg *RuntimeConfig) { + cfg.ClusterScoped = false + cfg.Namespace = namespace + } +} + +// WithLogger sets the logger for SDK operations. +func WithLogger(logger logr.Logger) RuntimeOption { + return func(cfg *RuntimeConfig) { + cfg.Logger = logger + } +} + +// WithDryRun enables dry-run mode for testing. +// In dry-run mode, the SDK doesn't make actual changes to the cluster. +func WithDryRun(dryRun bool) RuntimeOption { + return func(cfg *RuntimeConfig) { + cfg.DryRun = dryRun + } +} + +// WithMetrics enables Prometheus metrics for SDK operations. +func WithMetrics(enabled bool, port int) RuntimeOption { + return func(cfg *RuntimeConfig) { + cfg.EnableMetrics = enabled + cfg.MetricsPort = port + } +} + +// WithEventRecorder sets the event recorder for emitting Kubernetes events. +// Events are used to record state changes visible via kubectl describe. +// +// Usage: +// +// recorder := mgr.GetEventRecorderFor("splunk-operator") +// runtime, err := sdk.NewRuntime(mgr.GetClient(), +// sdk.WithEventRecorder(recorder), +// ) +func WithEventRecorder(recorder record.EventRecorder) RuntimeOption { + return func(cfg *RuntimeConfig) { + cfg.EventRecorder = recorder + } +} diff --git a/pkg/platform-sdk/api/secret/types.go b/pkg/platform-sdk/api/secret/types.go new file mode 100644 index 000000000..660fea6f2 --- /dev/null +++ b/pkg/platform-sdk/api/secret/types.go @@ -0,0 +1,266 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package secret provides types for secret resolution. +package secret + +// Binding describes what secret you need. +// +// The SDK validates that a Kubernetes secret exists with the required keys. +// The SDK does NOT create ExternalSecret CRs - that's the admin's job via kubectl/GitOps. +// +// Example (generic secret): +// +// secret, err := rctx.ResolveSecret(secret.Binding{ +// Name: "postgres-credentials", +// Type: secret.SecretTypeGeneric, +// Keys: []string{"username", "password"}, +// }) +// +// Example (Splunk secret with versioning): +// +// secret, err := rctx.ResolveSecret(secret.Binding{ +// Name: "splunk-standalone", +// Type: secret.SecretTypeSplunk, +// Keys: []string{ +// "password", +// "hec_token", +// "pass4SymmKey", +// }, +// }) +type Binding struct { + // Name of the Kubernetes secret. + // Required. + Name string + + // Namespace where the secret is located. + // If empty, defaults to the namespace from ReconcileContext. + Namespace string + + // Type of secret (affects validation and versioning behavior). + // Required. + Type SecretType + + // Keys that must exist in the secret. + // Required. Must have at least one key. + // + // The SDK validates that all keys are present in the secret.Data. + Keys []string + + // Service is the service type for pattern-based naming (CSI). + // Examples: "standalone", "clustermanager", "searchhead" + Service string + + // Instance is the instance name for pattern-based naming (CSI). + // Examples: "my-splunk", "cm", "sh1" + Instance string + + // CSI configuration for CSI-based secret mounting. + // If set, uses CSI driver instead of Kubernetes Secret objects. + CSI *CSIConfig + + // OwnerName is the name of the resource that owns this secret (for cleanup). + // Automatically set by ReconcileContext. + OwnerName string + + // CRSpec is the secret spec from the CR (for hierarchy resolution). + // Automatically set by ReconcileContext if the CR has a secret spec. + CRSpec *Spec +} + +// CSIConfig specifies CSI-based secret mounting configuration. +type CSIConfig struct { + // Provider is the CSI provider to use. + // Values: "vault", "aws", "azure", "gcp" + Provider string + + // ProviderClass is the name of the SecretProviderClass CRD. + // If not specified, SDK constructs from pattern. + ProviderClass string + + // MountPath is where to mount secrets in the pod. + // Default: /mnt/secrets/ + MountPath string + + // SyncToKubernetesSecret creates a K8s Secret for compatibility. + // Default: false (secrets only mounted via CSI) + SyncToKubernetesSecret bool +} + +// SecretType indicates what kind of secret this is. +type SecretType string + +const ( + // SecretTypeGeneric is a generic key-value secret. + // Use this for database credentials, API tokens, etc. + // + // The SDK validates the secret exists and has the required keys. + // No versioning is applied. + SecretTypeGeneric SecretType = "generic" + + // SecretTypeSplunk is a Splunk-specific secret with versioning support. + // Use this for Splunk secrets (admin password, HEC token, cluster keys, etc.). + // + // The SDK: + // - Creates versioned secrets (splunk-standalone-secret-v1, v2, v3) + // - Manages rotation when the source secret changes + // - Keeps last 3 versions for rollback + // - Includes default.yml with TLS and token configuration + SecretTypeSplunk SecretType = "splunk" + + // SecretTypeTLS is a TLS secret (same as kubernetes.io/tls). + // Use this when you need to reference an existing TLS secret. + // + // The SDK validates the secret exists and has: tls.crt, tls.key, ca.crt + SecretTypeTLS SecretType = "tls" + + // SecretTypeOpaque is an opaque secret (same as Opaque type). + // Use this when you need to reference an existing opaque secret. + SecretTypeOpaque SecretType = "opaque" +) + +// Spec is the secret specification from a CR. +// Feature controllers can include this in their CRs to allow users +// to override secret settings. +type Spec struct { + // SecretRef references an existing Kubernetes secret. + SecretRef *SecretRef + + // Generate indicates whether to generate a fallback secret if not found. + // Only applicable for development environments. + Generate bool +} + +// SecretRef references a Kubernetes secret. +type SecretRef struct { + // Name of the secret. + Name string + + // Namespace of the secret (optional, defaults to resource namespace). + Namespace string +} + +// Ref is what you get back from ResolveSecret - a reference to the secret. +// +// The Ref tells you: +// - Where the secret is (SecretName, Namespace) +// - Whether it's ready to use (Ready) +// - What keys it contains (Keys) +// - How it was created (Provider: "external-secrets", "kubernetes", or "generated") +// - Version (for Splunk secrets) +// - Any error if it's not ready (Error) +// +// Example usage: +// +// secret, err := rctx.ResolveSecret(binding) +// if err != nil { +// return ctrl.Result{}, err +// } +// +// if !secret.Ready { +// log.Info("Secret not ready", "error", secret.Error) +// return ctrl.Result{RequeueAfter: 10 * time.Second}, nil +// } +// +// // Use secret.SecretName in your StatefulSet +// sts := rctx.BuildStatefulSet(). +// WithSecret(secret). +// Build() +type Ref struct { + // SecretName is the Kubernetes secret name. + SecretName string + + // Namespace where the secret is located. + Namespace string + + // Keys that exist in the secret. + // This is the intersection of requested keys and actual keys in the secret. + Keys []string + + // Ready indicates if the secret exists and has all required keys. + // + // Ready=true means the secret exists and contains all requested keys. + // Ready=false means: + // - The secret doesn't exist (admin needs to create ExternalSecret or manual secret) + // - The secret exists but is missing required keys + // - ExternalSecret exists but ESO hasn't synced yet + // + // When Ready=false, requeue after 10 seconds to check again. + Ready bool + + // Provider indicates where this secret came from. + // - "external-secrets": Synced by External Secrets Operator from Vault/AWS/Azure/GCP + // - "kubernetes": Created manually by admin via kubectl + // - "generated": Generated by SDK as fallback (development only) + Provider string + + // Version for Splunk secrets (e.g., v1, v2, v3). + // Only set for SecretTypeSplunk. + Version *int + + // Error contains any error message if Ready is false. + // Examples: + // - "secret not found" + // - "missing required keys: password" + // - "ExternalSecret status not synced" + Error string + + // SourceSecretName is the source secret for versioned Splunk secrets. + // Example: "splunk-default-secret" is the source for "splunk-standalone-secret-v1" + // Only set for SecretTypeSplunk. + SourceSecretName string + + // CSI contains CSI mounting information if using CSI secrets. + // Only set when Provider starts with "csi-". + CSI *CSIInfo +} + +// CSIInfo provides CSI mounting information for secrets. +type CSIInfo struct { + // ProviderClass is the name of the SecretProviderClass CRD. + ProviderClass string + + // Driver is the CSI driver name. + // Usually: "secrets-store.csi.k8s.io" + Driver string + + // MountPath is where secrets will be mounted in the pod. + // Example: /mnt/secrets/my-app-secrets + MountPath string + + // Files are the expected files that will be available after mounting. + // These correspond to the Keys from the Binding. + // Example: ["username", "password"] + Files []string +} + +// HasKey returns true if the secret has the specified key. +func (r *Ref) HasKey(key string) bool { + for _, k := range r.Keys { + if k == key { + return true + } + } + return false +} + +// HasAllKeys returns true if the secret has all the specified keys. +func (r *Ref) HasAllKeys(keys []string) bool { + for _, key := range keys { + if !r.HasKey(key) { + return false + } + } + return true +} diff --git a/pkg/platform-sdk/examples/basic/main.go b/pkg/platform-sdk/examples/basic/main.go new file mode 100644 index 000000000..17241a633 --- /dev/null +++ b/pkg/platform-sdk/examples/basic/main.go @@ -0,0 +1,308 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. + +// Package main demonstrates basic Platform SDK usage for a Splunk Standalone controller. +// +// This example shows: +// - Setting up the SDK runtime +// - Using ReconcileContext in reconcile loop +// - Certificate and secret resolution +// - Building Kubernetes resources with fluent API +// - Event recording and structured logging +package main + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + sdk "github.com/splunk/splunk-operator/pkg/platform-sdk" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" +) + +// StandaloneReconciler reconciles a Splunk Standalone instance. +type StandaloneReconciler struct { + client.Client + Scheme *runtime.Scheme + sdkRuntime api.Runtime +} + +// Standalone is a simplified CRD for this example. +type Standalone struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec StandaloneSpec `json:"spec,omitempty"` +} + +type StandaloneSpec struct { + Replicas int32 `json:"replicas"` + Image string `json:"image"` +} + +func (s *Standalone) DeepCopyObject() runtime.Object { + return &Standalone{ + TypeMeta: s.TypeMeta, + ObjectMeta: *s.ObjectMeta.DeepCopy(), + Spec: s.Spec, + } +} + +// SetupWithManager sets up the controller with the Manager. +func (r *StandaloneReconciler) SetupWithManager(mgr ctrl.Manager) error { + logger := log.FromContext(context.Background()) + + // Create event recorder + recorder := mgr.GetEventRecorderFor("splunk-operator") + + // Create SDK runtime + sdkRuntime, err := sdk.NewRuntime( + mgr.GetClient(), + sdk.WithClusterScoped(), + sdk.WithLogger(logger), + sdk.WithEventRecorder(recorder), + ) + if err != nil { + return fmt.Errorf("failed to create SDK runtime: %w", err) + } + + // Start the SDK (initializes services, starts watchers) + if err := sdkRuntime.Start(context.Background()); err != nil { + return fmt.Errorf("failed to start SDK runtime: %w", err) + } + + r.sdkRuntime = sdkRuntime + + logger.Info("Platform SDK initialized successfully") + + return ctrl.NewControllerManagedBy(mgr). + For(&Standalone{}). + Complete(r) +} + +// Reconcile implements the reconciliation logic. +func (r *StandaloneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // Create SDK ReconcileContext for this reconciliation + rctx := r.sdkRuntime.NewReconcileContext(ctx, req.Namespace, req.Name) + + rctx.Logger().Info("Starting reconciliation") + + // Fetch the Standalone CR + standalone := &Standalone{} + if err := r.Get(ctx, req.NamespacedName, standalone); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Step 1: Resolve TLS certificate + rctx.Logger().V(1).Info("Resolving TLS certificate") + + duration := 90 * 24 * time.Hour // 90 days + cert, err := rctx.ResolveCertificate(certificate.Binding{ + Name: fmt.Sprintf("%s-tls", standalone.Name), + DNSNames: []string{ + fmt.Sprintf("%s.%s.svc", standalone.Name, standalone.Namespace), + fmt.Sprintf("%s.%s.svc.cluster.local", standalone.Name, standalone.Namespace), + }, + Duration: &duration, + }) + if err != nil { + rctx.Logger().Error(err, "Failed to resolve certificate") + return ctrl.Result{}, err + } + + if !cert.Ready { + rctx.Logger().Info("Certificate not ready, requeueing", + "provider", cert.Provider, + "secretName", cert.SecretName) + + // Record event about certificate provisioning + rctx.EventRecorder().Event(standalone, corev1.EventTypeNormal, + "CertificateProvisioning", + fmt.Sprintf("Waiting for certificate %s from %s", cert.SecretName, cert.Provider)) + + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + + rctx.Logger().Info("Certificate ready", + "provider", cert.Provider, + "secretName", cert.SecretName) + + // Record event about certificate being ready + rctx.EventRecorder().Event(standalone, corev1.EventTypeNormal, + api.EventReasonCertificateReady, + fmt.Sprintf("Certificate %s is ready", cert.SecretName)) + + // Step 2: Resolve Splunk credentials secret + rctx.Logger().V(1).Info("Resolving Splunk credentials") + + secretRef, err := rctx.ResolveSecret(secret.Binding{ + Name: fmt.Sprintf("%s-credentials", standalone.Name), + Type: secret.SecretTypeSplunk, + Keys: []string{"password", "hec_token"}, + }) + if err != nil { + rctx.Logger().Error(err, "Failed to resolve secret") + return ctrl.Result{}, err + } + + if !secretRef.Ready { + rctx.Logger().Info("Secret not ready", "secretName", secretRef.SecretName) + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + + if secretRef.Version != nil { + rctx.Logger().V(1).Info("Using versioned secret", + "version", *secretRef.Version, + "secretName", secretRef.SecretName) + } + + // Step 3: Build ConfigMap with Splunk configuration + rctx.Logger().V(1).Info("Building ConfigMap") + + configMap, err := rctx.BuildConfigMap(). + WithName(fmt.Sprintf("%s-config", standalone.Name)). + WithData(map[string]string{ + "default.yml": ` +splunk: + hec: + enable: true + port: 8088 + s2s: + enable: true + port: 9997 +`, + }). + Build() + if err != nil { + return ctrl.Result{}, err + } + + // Apply ConfigMap + if err := r.Patch(ctx, configMap, client.Apply, + client.ForceOwnership, + client.FieldOwner("splunk-operator")); err != nil { + return ctrl.Result{}, err + } + + // Step 4: Build StatefulSet + rctx.Logger().V(1).Info("Building StatefulSet") + + sts, err := rctx.BuildStatefulSet(). + WithName(standalone.Name). + WithReplicas(standalone.Spec.Replicas). + WithImage(standalone.Spec.Image). + WithPorts([]corev1.ContainerPort{ + {Name: "web", ContainerPort: 8000, Protocol: corev1.ProtocolTCP}, + {Name: "mgmt", ContainerPort: 8089, Protocol: corev1.ProtocolTCP}, + {Name: "hec", ContainerPort: 8088, Protocol: corev1.ProtocolTCP}, + {Name: "s2s", ContainerPort: 9997, Protocol: corev1.ProtocolTCP}, + }). + WithCertificate(cert). // Auto-mounts certificate at /etc/certs/{secretName} + WithSecret(secretRef). // Auto-creates volume for secret + WithConfigMap(configMap.Name). + WithEnv(corev1.EnvVar{ + Name: "SPLUNK_START_ARGS", + Value: "--accept-license", + }). + WithEnv(corev1.EnvVar{ + Name: "SPLUNK_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secretRef.SecretName, + }, + Key: "password", + }, + }, + }). + WithResources(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("8Gi"), + }, + }). + WithObservability(). // Adds Prometheus scraping annotations + Build() + if err != nil { + return ctrl.Result{}, err + } + + // Apply StatefulSet + if err := r.Patch(ctx, sts, client.Apply, + client.ForceOwnership, + client.FieldOwner("splunk-operator")); err != nil { + return ctrl.Result{}, err + } + + rctx.Logger().Info("StatefulSet applied", "name", sts.Name) + + // Step 5: Build Service + rctx.Logger().V(1).Info("Building Service") + + service, err := rctx.BuildService(). + WithName(standalone.Name). + WithType(corev1.ServiceTypeClusterIP). + WithPorts([]corev1.ServicePort{ + { + Name: "web", + Port: 8000, + TargetPort: intstr.FromInt(8000), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "mgmt", + Port: 8089, + TargetPort: intstr.FromInt(8089), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "hec", + Port: 8088, + TargetPort: intstr.FromInt(8088), + Protocol: corev1.ProtocolTCP, + }, + }). + WithDiscoveryLabels(). // Enables service discovery + Build() + if err != nil { + return ctrl.Result{}, err + } + + // Apply Service + if err := r.Patch(ctx, service, client.Apply, + client.ForceOwnership, + client.FieldOwner("splunk-operator")); err != nil { + return ctrl.Result{}, err + } + + rctx.Logger().Info("Service applied", "name", service.Name) + + // Record successful reconciliation event + rctx.EventRecorder().Event(standalone, corev1.EventTypeNormal, + "ReconciliationComplete", + "Splunk Standalone has been successfully reconciled") + + rctx.Logger().Info("Reconciliation complete") + + return ctrl.Result{}, nil +} + +func main() { + // This is just an example showing SDK usage patterns. + // In a real controller, you would call SetupWithManager from main.go + fmt.Println("This is an example showing Platform SDK usage") + fmt.Println("See the code for complete patterns and best practices") +} diff --git a/pkg/platform-sdk/examples/service-discovery.go b/pkg/platform-sdk/examples/service-discovery.go new file mode 100644 index 000000000..474ff5963 --- /dev/null +++ b/pkg/platform-sdk/examples/service-discovery.go @@ -0,0 +1,225 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. + +// Package examples demonstrates Platform SDK service discovery patterns. +// +// This example shows: +// - Discovering Splunk instances by type +// - Finding indexer clusters for search head configuration +// - Checking service health and readiness +// - Using discovery in controller logic +package examples + +import ( + "context" + "fmt" + + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/discovery" +) + +// DiscoverIndexersExample shows how to find indexer clusters for a search head. +func DiscoverIndexersExample(rctx api.ReconcileContext, searchHeadNamespace string) error { + logger := rctx.Logger() + + logger.Info("Discovering indexer clusters for search head configuration") + + // Discover all indexer clusters in the same namespace + indexers, err := rctx.DiscoverSplunk(discovery.SplunkSelector{ + Type: discovery.SplunkTypeIndexerCluster, + Namespace: searchHeadNamespace, + }) + if err != nil { + return fmt.Errorf("failed to discover indexers: %w", err) + } + + if len(indexers) == 0 { + logger.Info("No indexer clusters found, search head will run standalone") + return nil + } + + logger.Info("Found indexer clusters", "count", len(indexers)) + + // Process each indexer cluster + for _, indexer := range indexers { + if indexer.Health == nil || !indexer.Health.Healthy { + logger.Info("Indexer not healthy, skipping", + "name", indexer.Name, + "namespace", indexer.Namespace) + continue + } + + logger.Info("Configuring search head to use indexer", + "indexer", indexer.Name, + "url", indexer.URL, + "type", indexer.Type) + + // In real controller, you would: + // 1. Get cluster manager endpoint from indexer.URL + // 2. Configure search head to use this indexer cluster + // 3. Update search head configmap with peer nodes + } + + return nil +} + +// DiscoverLicenseManagerExample shows how to find a license manager. +func DiscoverLicenseManagerExample(rctx api.ReconcileContext, namespace string) (*discovery.SplunkEndpoint, error) { + logger := rctx.Logger() + + logger.Info("Discovering license manager") + + // Find license managers + licenseMgrs, err := rctx.DiscoverSplunk(discovery.SplunkSelector{ + Type: discovery.SplunkTypeLicenseManager, + Namespace: namespace, + }) + if err != nil { + return nil, fmt.Errorf("failed to discover license manager: %w", err) + } + + if len(licenseMgrs) == 0 { + logger.Info("No license manager found, using local licensing") + return nil, nil + } + + // Use the first healthy license manager + for _, lm := range licenseMgrs { + if lm.Health != nil && lm.Health.Healthy { + logger.Info("Found license manager", + "name", lm.Name, + "url", lm.URL) + return &lm, nil + } + } + + logger.Info("No healthy license manager found") + return nil, nil +} + +// DiscoverClusterManagerExample shows how to find a cluster manager for an indexer. +func DiscoverClusterManagerExample(rctx api.ReconcileContext, indexerNamespace string) (*discovery.SplunkEndpoint, error) { + logger := rctx.Logger() + + logger.V(1).Info("Discovering cluster manager for indexer") + + // Find cluster managers in the same namespace + clusterMgrs, err := rctx.DiscoverSplunk(discovery.SplunkSelector{ + Type: discovery.SplunkTypeClusterManager, + Namespace: indexerNamespace, + Labels: map[string]string{ + "app.kubernetes.io/part-of": "splunk-enterprise", + }, + }) + if err != nil { + return nil, fmt.Errorf("failed to discover cluster manager: %w", err) + } + + if len(clusterMgrs) == 0 { + return nil, fmt.Errorf("no cluster manager found for indexer cluster") + } + + // Verify the cluster manager is ready + cm := &clusterMgrs[0] + if cm.Health == nil || !cm.Health.Healthy { + return nil, fmt.Errorf("cluster manager %s is not healthy", cm.Name) + } + + logger.Info("Found cluster manager", + "name", cm.Name, + "url", cm.URL) + + return cm, nil +} + +// DiscoverGenericServicesExample shows how to discover non-Splunk services. +func DiscoverGenericServicesExample(rctx api.ReconcileContext) error { + logger := rctx.Logger() + + logger.V(1).Info("Discovering external services") + + // Find Kafka services for data ingestion + kafkaServices, err := rctx.Discover(discovery.Selector{ + Labels: map[string]string{ + "app": "kafka", + }, + Namespace: "data-platform", + }) + if err != nil { + return fmt.Errorf("failed to discover Kafka services: %w", err) + } + + logger.Info("Found Kafka services", "count", len(kafkaServices)) + + for _, svc := range kafkaServices { + logger.V(1).Info("Kafka service discovered", + "name", svc.Name, + "url", svc.URL, + "namespace", svc.Namespace) + + // Configure Splunk to ingest from this Kafka instance + } + + return nil +} + +// CrossNamespaceDiscoveryExample shows discovering resources across namespaces. +func CrossNamespaceDiscoveryExample(rctx api.ReconcileContext) error { + logger := rctx.Logger() + + logger.Info("Discovering all Splunk instances across cluster") + + // Discover all standalones (cluster-wide) + standalones, err := rctx.DiscoverSplunk(discovery.SplunkSelector{ + Type: discovery.SplunkTypeStandalone, + Namespace: "", // Empty = all namespaces + }) + if err != nil { + return fmt.Errorf("failed to discover standalones: %w", err) + } + + // Group by namespace + byNamespace := make(map[string][]discovery.SplunkEndpoint) + for _, standalone := range standalones { + byNamespace[standalone.Namespace] = append( + byNamespace[standalone.Namespace], + standalone, + ) + } + + for ns, instances := range byNamespace { + logger.Info("Standalones in namespace", + "namespace", ns, + "count", len(instances)) + } + + return nil +} + +// Example usage in a real reconciler: +func ReconcileSearchHeadWithDiscovery(ctx context.Context, rctx api.ReconcileContext) error { + logger := rctx.Logger() + + // Step 1: Find indexer clusters + if err := DiscoverIndexersExample(rctx, rctx.Namespace()); err != nil { + return err + } + + // Step 2: Find license manager + licenseMgr, err := DiscoverLicenseManagerExample(rctx, rctx.Namespace()) + if err != nil { + return err + } + + if licenseMgr != nil { + logger.Info("Will configure search head to use license manager", + "licenseManagerURL", licenseMgr.URL) + + // Configure search head with license manager URL + // This would be done via ConfigMap or environment variables + } + + // Step 3: Build search head configuration based on discoveries + // ... (continue with StatefulSet building using discovered endpoints) + + return nil +} diff --git a/pkg/platform-sdk/integration_test.go b/pkg/platform-sdk/integration_test.go new file mode 100644 index 000000000..67619f054 --- /dev/null +++ b/pkg/platform-sdk/integration_test.go @@ -0,0 +1,519 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. + +package sdk_test + +import ( + "context" + "testing" + "time" + + "github.com/go-logr/logr" + platformv4 "github.com/splunk/splunk-operator/api/platform/v4" + sdk "github.com/splunk/splunk-operator/pkg/platform-sdk" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/config" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/discovery" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +// TestSDKIntegration_BasicWorkflow tests the complete SDK workflow. +func TestSDKIntegration_BasicWorkflow(t *testing.T) { + // Create fake client with scheme + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + _ = config.AddToScheme(scheme) + _ = platformv4.AddToScheme(scheme) + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + // Create event recorder + eventRecorder := record.NewFakeRecorder(100) + + // Create SDK runtime + runtime, err := sdk.NewRuntime( + fakeClient, + sdk.WithClusterScoped(), + sdk.WithLogger(logr.Discard()), + sdk.WithEventRecorder(eventRecorder), + ) + if err != nil { + t.Fatalf("NewRuntime() failed: %v", err) + } + + // Start the runtime + ctx := context.Background() + if err := runtime.Start(ctx); err != nil { + t.Fatalf("Start() failed: %v", err) + } + + // Create reconcile context + rctx := runtime.NewReconcileContext(ctx, "default", "test-standalone") + + // Verify context properties + if rctx.Namespace() != "default" { + t.Errorf("Namespace() = %v, want default", rctx.Namespace()) + } + if rctx.Name() != "test-standalone" { + t.Errorf("Name() = %v, want test-standalone", rctx.Name()) + } + if rctx.Context() != ctx { + t.Error("Context() should return original context") + } + // Logger is a zero-value struct if nil, so just verify we can call it + _ = rctx.Logger() + if rctx.EventRecorder() == nil { + t.Error("EventRecorder() should not be nil") + } + + // Test certificate resolution (self-signed fallback) + certRef, err := rctx.ResolveCertificate(certificate.Binding{ + Name: "test-tls", + DNSNames: []string{ + "test.default.svc", + "test.default.svc.cluster.local", + }, + }) + if err != nil { + t.Fatalf("ResolveCertificate() failed: %v", err) + } + + // Self-signed should be ready immediately + if !certRef.Ready { + t.Error("Self-signed certificate should be ready immediately") + } + if certRef.Provider != "self-signed" { + t.Errorf("Provider = %v, want self-signed", certRef.Provider) + } + if certRef.SecretName != "test-tls" { + t.Errorf("SecretName = %v, want test-tls", certRef.SecretName) + } + + // Verify certificate secret was created + certSecret := &corev1.Secret{} + err = fakeClient.Get(ctx, client.ObjectKey{ + Name: "test-tls", + Namespace: "default", + }, certSecret) + if err != nil { + t.Errorf("Certificate secret not created: %v", err) + } + + // Test secret resolution + // First, create a source secret + sourceSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-default-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "password": []byte("admin123"), + "hec_token": []byte("token123"), + }, + } + if err := fakeClient.Create(ctx, sourceSecret); err != nil { + t.Fatalf("Failed to create source secret: %v", err) + } + + secretRef, err := rctx.ResolveSecret(secret.Binding{ + Name: "test-credentials", + Type: secret.SecretTypeSplunk, + Keys: []string{"password", "hec_token"}, + }) + if err != nil { + t.Fatalf("ResolveSecret() failed: %v", err) + } + + if !secretRef.Ready { + t.Error("Secret should be ready") + } + if secretRef.Version == nil { + t.Error("Versioned secret should have version") + } else if *secretRef.Version != 1 { + t.Errorf("First version should be 1, got %v", *secretRef.Version) + } + + // Test service discovery + // Create a test service + testService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-indexer", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/managed-by": "splunk-operator", + "app.kubernetes.io/component": "IndexerCluster", + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + {Name: "mgmt", Port: 8089}, + }, + }, + } + if err := fakeClient.Create(ctx, testService); err != nil { + t.Fatalf("Failed to create test service: %v", err) + } + + endpoints, err := rctx.DiscoverSplunk(discovery.SplunkSelector{ + Type: discovery.SplunkTypeIndexerCluster, + Namespace: "default", + }) + if err != nil { + t.Fatalf("DiscoverSplunk() failed: %v", err) + } + + if len(endpoints) != 1 { + t.Errorf("Expected 1 endpoint, got %v", len(endpoints)) + } + if len(endpoints) > 0 { + if endpoints[0].Name != "test-indexer" { + t.Errorf("Endpoint name = %v, want test-indexer", endpoints[0].Name) + } + if endpoints[0].Type != discovery.SplunkTypeIndexerCluster { + t.Errorf("Endpoint type = %v, want IndexerCluster", endpoints[0].Type) + } + } + + // Test builders + sts, err := rctx.BuildStatefulSet(). + WithName("test-standalone"). + WithImage("splunk/splunk:9.1.0"). + WithReplicas(3). + WithPorts([]corev1.ContainerPort{ + {Name: "web", ContainerPort: 8000}, + {Name: "mgmt", ContainerPort: 8089}, + }). + WithCertificate(certRef). + WithSecret(secretRef). + Build() + if err != nil { + t.Fatalf("BuildStatefulSet() failed: %v", err) + } + + if sts.Name != "test-standalone" { + t.Errorf("StatefulSet name = %v, want test-standalone", sts.Name) + } + if *sts.Spec.Replicas != 3 { + t.Errorf("Replicas = %v, want 3", *sts.Spec.Replicas) + } + + // Verify certificate and secret volumes were created + foundCertVol := false + foundSecretVol := false + for _, vol := range sts.Spec.Template.Spec.Volumes { + if vol.Name == certRef.SecretName { + foundCertVol = true + } + if vol.Name == secretRef.SecretName { + foundSecretVol = true + } + } + if !foundCertVol { + t.Error("Certificate volume not created") + } + if !foundSecretVol { + t.Error("Secret volume not created") + } + + // Test service builder + svc, err := rctx.BuildService(). + WithName("test-standalone"). + WithType(corev1.ServiceTypeClusterIP). + WithPorts([]corev1.ServicePort{ + {Name: "web", Port: 8000, TargetPort: intstr.FromInt(8000)}, + {Name: "mgmt", Port: 8089, TargetPort: intstr.FromInt(8089)}, + }). + WithDiscoveryLabels(). + Build() + if err != nil { + t.Fatalf("BuildService() failed: %v", err) + } + + if svc.Labels["splunk.com/discoverable"] != "true" { + t.Error("Discovery label not set") + } + + // Test configmap builder + cm, err := rctx.BuildConfigMap(). + WithName("test-config"). + WithData(map[string]string{ + "server.conf": "[general]\nserverName = test", + }). + Build() + if err != nil { + t.Fatalf("BuildConfigMap() failed: %v", err) + } + + if cm.Data["server.conf"] == "" { + t.Error("ConfigMap data not set") + } + + // Stop the runtime + if err := runtime.Stop(); err != nil { + t.Errorf("Stop() failed: %v", err) + } +} + +// TestSDKIntegration_ConfigurationHierarchy tests configuration resolution. +func TestSDKIntegration_ConfigurationHierarchy(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + _ = config.AddToScheme(scheme) + _ = platformv4.AddToScheme(scheme) + + // Create PlatformConfig + duration := metav1.Duration{Duration: 90 * 24 * 3600} + platformConfig := &config.PlatformConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "platform-default", + }, + Spec: config.PlatformConfigSpec{ + Certificates: config.CertificateConfig{ + Provider: "self-signed", + Duration: &duration, + }, + Observability: config.ObservabilityConfig{ + Enabled: true, + PrometheusAnnotations: config.PrometheusAnnotations{ + Scrape: true, + Port: 9090, + Path: "/metrics", + }, + }, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(platformConfig). + Build() + + runtime, err := sdk.NewRuntime( + fakeClient, + sdk.WithClusterScoped(), + sdk.WithLogger(logr.Discard()), + ) + if err != nil { + t.Fatalf("NewRuntime() failed: %v", err) + } + + ctx := context.Background() + if err := runtime.Start(ctx); err != nil { + t.Fatalf("Start() failed: %v", err) + } + defer runtime.Stop() + + rctx := runtime.NewReconcileContext(ctx, "default", "test") + + // Test certificate with platform config + certRef, err := rctx.ResolveCertificate(certificate.Binding{ + Name: "test-cert", + DNSNames: []string{"test.svc"}, + }) + if err != nil { + t.Fatalf("ResolveCertificate() failed: %v", err) + } + + if certRef.Provider != "self-signed" { + t.Errorf("Provider = %v, want self-signed (from PlatformConfig)", certRef.Provider) + } +} + +// TestSDKIntegration_SecretVersioning tests secret version management. +func TestSDKIntegration_SecretVersioning(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + _ = config.AddToScheme(scheme) + _ = platformv4.AddToScheme(scheme) + + // Create source secret + sourceSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-default-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "password": []byte("initial-password"), + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(sourceSecret). + Build() + + runtime, err := sdk.NewRuntime( + fakeClient, + sdk.WithClusterScoped(), + sdk.WithLogger(logr.Discard()), + ) + if err != nil { + t.Fatalf("NewRuntime() failed: %v", err) + } + + ctx := context.Background() + if err := runtime.Start(ctx); err != nil { + t.Fatalf("Start() failed: %v", err) + } + defer runtime.Stop() + + rctx := runtime.NewReconcileContext(ctx, "default", "test") + + // First resolution creates v1 + secret1, err := rctx.ResolveSecret(secret.Binding{ + Name: "test-secret", + Type: secret.SecretTypeSplunk, + Keys: []string{"password"}, + }) + if err != nil { + t.Fatalf("First ResolveSecret() failed: %v", err) + } + + if *secret1.Version != 1 { + t.Errorf("First version = %v, want 1", *secret1.Version) + } + + // Second resolution with same content returns same version + secret2, err := rctx.ResolveSecret(secret.Binding{ + Name: "test-secret", + Type: secret.SecretTypeSplunk, + Keys: []string{"password"}, + }) + if err != nil { + t.Fatalf("Second ResolveSecret() failed: %v", err) + } + + if *secret2.Version != 1 { + t.Errorf("Second version = %v, want 1 (no change)", *secret2.Version) + } + + // Update source secret + sourceSecret.Data["password"] = []byte("new-password") + if err := fakeClient.Update(ctx, sourceSecret); err != nil { + t.Fatalf("Failed to update source secret: %v", err) + } + + // Third resolution with changed content creates v2 + secret3, err := rctx.ResolveSecret(secret.Binding{ + Name: "test-secret", + Type: secret.SecretTypeSplunk, + Keys: []string{"password"}, + }) + if err != nil { + t.Fatalf("Third ResolveSecret() failed: %v", err) + } + + if *secret3.Version != 2 { + t.Errorf("Third version = %v, want 2 (changed content)", *secret3.Version) + } +} + +// TestSDKIntegration_MultipleContexts tests multiple concurrent reconcile contexts. +func TestSDKIntegration_MultipleContexts(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + _ = config.AddToScheme(scheme) + _ = platformv4.AddToScheme(scheme) + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + runtime, err := sdk.NewRuntime( + fakeClient, + sdk.WithClusterScoped(), + sdk.WithLogger(logr.Discard()), + ) + if err != nil { + t.Fatalf("NewRuntime() failed: %v", err) + } + + ctx := context.Background() + if err := runtime.Start(ctx); err != nil { + t.Fatalf("Start() failed: %v", err) + } + defer runtime.Stop() + + // Create multiple contexts + rctx1 := runtime.NewReconcileContext(ctx, "ns1", "resource1") + rctx2 := runtime.NewReconcileContext(ctx, "ns2", "resource2") + rctx3 := runtime.NewReconcileContext(ctx, "ns1", "resource3") + + // Verify contexts are independent + if rctx1.Namespace() != "ns1" || rctx1.Name() != "resource1" { + t.Error("Context 1 has wrong namespace/name") + } + if rctx2.Namespace() != "ns2" || rctx2.Name() != "resource2" { + t.Error("Context 2 has wrong namespace/name") + } + if rctx3.Namespace() != "ns1" || rctx3.Name() != "resource3" { + t.Error("Context 3 has wrong namespace/name") + } + + // All contexts should share the same runtime + if rctx1.Context() != ctx || rctx2.Context() != ctx || rctx3.Context() != ctx { + t.Error("Contexts should share the same Go context") + } +} + +// TestSDKIntegration_EventRecording tests event emission. +func TestSDKIntegration_EventRecording(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + _ = config.AddToScheme(scheme) + _ = platformv4.AddToScheme(scheme) + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + eventRecorder := record.NewFakeRecorder(10) + + runtime, err := sdk.NewRuntime( + fakeClient, + sdk.WithClusterScoped(), + sdk.WithLogger(logr.Discard()), + sdk.WithEventRecorder(eventRecorder), + ) + if err != nil { + t.Fatalf("NewRuntime() failed: %v", err) + } + + ctx := context.Background() + if err := runtime.Start(ctx); err != nil { + t.Fatalf("Start() failed: %v", err) + } + defer runtime.Stop() + + rctx := runtime.NewReconcileContext(ctx, "default", "test") + + // Create a dummy object + obj := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + // Record an event + rctx.EventRecorder().Event(obj, corev1.EventTypeNormal, "TestReason", "Test message") + + // Verify event was recorded + select { + case event := <-eventRecorder.Events: + if event != "Normal TestReason Test message" { + t.Errorf("Event = %v, want 'Normal TestReason Test message'", event) + } + case <-time.After(1 * time.Second): + t.Error("Event was not recorded") + } +} diff --git a/pkg/platform-sdk/internal/context.go b/pkg/platform-sdk/internal/context.go new file mode 100644 index 000000000..c3d671224 --- /dev/null +++ b/pkg/platform-sdk/internal/context.go @@ -0,0 +1,176 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/builders" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/discovery" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" + "k8s.io/client-go/tools/record" +) + +// reconcileContext implements the api.ReconcileContext interface. +// +// ReconcileContext is a lightweight wrapper that: +// - Knows about the specific resource being reconciled (namespace, name) +// - Provides a logger with resource context +// - Delegates to the registry's services for actual work +// - Provides a clean, convenient API for controllers +type reconcileContext struct { + ctx context.Context + namespace string + name string + runtime *runtime + registry *registry + logger logr.Logger +} + +// Context information + +func (rc *reconcileContext) Namespace() string { + return rc.namespace +} + +func (rc *reconcileContext) Name() string { + return rc.name +} + +func (rc *reconcileContext) Context() context.Context { + return rc.ctx +} + +func (rc *reconcileContext) Logger() logr.Logger { + return rc.logger +} + +func (rc *reconcileContext) EventRecorder() record.EventRecorder { + return rc.runtime.GetEventRecorder() +} + +// Certificate resolution + +func (rc *reconcileContext) ResolveCertificate(binding certificate.Binding) (*certificate.Ref, error) { + // Fill in context if not provided + if binding.Namespace == "" { + binding.Namespace = rc.namespace + } + if binding.OwnerName == "" { + binding.OwnerName = rc.name + } + + // Get the certificate resolver from the registry + resolver := rc.registry.CertificateResolver() + if resolver == nil { + return nil, fmt.Errorf("certificate resolver not available") + } + + // Delegate to the resolver + return resolver.Resolve(rc.ctx, binding) +} + +// Secret resolution + +func (rc *reconcileContext) ResolveSecret(binding secret.Binding) (*secret.Ref, error) { + // Fill in context if not provided + if binding.Namespace == "" { + binding.Namespace = rc.namespace + } + if binding.OwnerName == "" { + binding.OwnerName = rc.name + } + + // Get the secret resolver from the registry + resolver := rc.registry.SecretResolver() + if resolver == nil { + return nil, fmt.Errorf("secret resolver not available") + } + + // Delegate to the resolver + return resolver.Resolve(rc.ctx, binding) +} + +// Service discovery + +func (rc *reconcileContext) DiscoverSplunk(selector discovery.SplunkSelector) ([]discovery.SplunkEndpoint, error) { + // Fill in context if not provided + if selector.Namespace == "" && !rc.runtime.config.ClusterScoped { + selector.Namespace = rc.namespace + } + + // Get the discovery service from the registry + discoveryService := rc.registry.DiscoveryService() + if discoveryService == nil { + return nil, fmt.Errorf("discovery service not available") + } + + // Delegate to the discovery service + return discoveryService.DiscoverSplunk(rc.ctx, selector) +} + +func (rc *reconcileContext) Discover(selector discovery.Selector) ([]discovery.Endpoint, error) { + // Fill in context if not provided + if selector.Namespace == "" && !rc.runtime.config.ClusterScoped { + selector.Namespace = rc.namespace + } + + // Get the discovery service from the registry + discoveryService := rc.registry.DiscoveryService() + if discoveryService == nil { + return nil, fmt.Errorf("discovery service not available") + } + + // Delegate to the discovery service + return discoveryService.Discover(rc.ctx, selector) +} + +// Configuration resolution + +func (rc *reconcileContext) ResolveConfig(key string) (interface{}, error) { + // Get the config resolver from the registry + resolver := rc.registry.ConfigResolver() + if resolver == nil { + return nil, fmt.Errorf("config resolver not available") + } + + // Delegate to the resolver + return resolver.ResolveConfig(rc.ctx, key, rc.namespace) +} + +// Resource builders + +func (rc *reconcileContext) BuildStatefulSet() builders.StatefulSetBuilder { + return rc.registry.StatefulSetBuilder(rc.namespace, rc.name) +} + +func (rc *reconcileContext) BuildService() builders.ServiceBuilder { + return rc.registry.ServiceBuilder(rc.namespace, rc.name) +} + +func (rc *reconcileContext) BuildConfigMap() builders.ConfigMapBuilder { + return rc.registry.ConfigMapBuilder(rc.namespace, rc.name) +} + +func (rc *reconcileContext) BuildPod() builders.PodBuilder { + return rc.registry.PodBuilder(rc.namespace, rc.name) +} + +func (rc *reconcileContext) BuildDeployment() builders.DeploymentBuilder { + return rc.registry.DeploymentBuilder(rc.namespace, rc.name) +} diff --git a/pkg/platform-sdk/internal/registry.go b/pkg/platform-sdk/internal/registry.go new file mode 100644 index 000000000..813443d92 --- /dev/null +++ b/pkg/platform-sdk/internal/registry.go @@ -0,0 +1,289 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "fmt" + "sync" + + "github.com/go-logr/logr" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/builders" + "github.com/splunk/splunk-operator/pkg/platform-sdk/services" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// registry is the service locator that creates, caches, and provides +// access to all SDK services. +// +// The registry uses lazy initialization with sync.Once to ensure each +// service is created exactly once and cached for reuse. +// +// Services can depend on each other (e.g., CertificateResolver depends +// on ConfigResolver). The registry handles dependency injection. +type registry struct { + client client.Client + config *api.RuntimeConfig + logger logr.Logger + + // Service instances (created lazily) + configResolver services.ConfigResolver + configResolverOnce sync.Once + + certificateResolver services.CertificateResolver + certificateResolverOnce sync.Once + + secretResolver services.SecretResolver + secretResolverOnce sync.Once + + discoveryService services.DiscoveryService + discoveryServiceOnce sync.Once + + observabilityService services.ObservabilityService + observabilityServiceOnce sync.Once + + backupService services.BackupService + backupServiceOnce sync.Once + + // Provider detection results (cached) + hasCertManager bool + hasCertManagerOnce sync.Once + + hasExternalSecrets bool + hasExternalSecretsOnce sync.Once + + hasOTelCollector bool + hasOTelCollectorOnce sync.Once + + // Lifecycle + started bool + mu sync.RWMutex +} + +// newRegistry creates a new service registry. +func newRegistry(client client.Client, config *api.RuntimeConfig, logger logr.Logger) (*registry, error) { + return ®istry{ + client: client, + config: config, + logger: logger.WithName("registry"), + }, nil +} + +// Start initializes the registry and starts services. +func (r *registry) Start(ctx context.Context) error { + r.mu.Lock() + defer r.mu.Unlock() + + if r.started { + return fmt.Errorf("registry already started") + } + + r.logger.Info("Starting service registry") + + // Detect available providers + r.detectProviders() + + r.logger.Info("Provider detection complete", + "certManager", r.hasCertManager, + "externalSecrets", r.hasExternalSecrets, + "otelCollector", r.hasOTelCollector, + ) + + // Initialize config resolver (needed by other services) + if err := r.initConfigResolver(ctx); err != nil { + return fmt.Errorf("failed to initialize config resolver: %w", err) + } + + r.started = true + r.logger.Info("Service registry started") + + return nil +} + +// Stop gracefully shuts down the registry. +func (r *registry) Stop() error { + r.mu.Lock() + defer r.mu.Unlock() + + if !r.started { + return nil + } + + r.logger.Info("Stopping service registry") + + // Stop services (if they implement lifecycle methods) + // TODO: Add service lifecycle management + + r.started = false + r.logger.Info("Service registry stopped") + + return nil +} + +// ConfigResolver returns the config resolver service. +func (r *registry) ConfigResolver() services.ConfigResolver { + r.configResolverOnce.Do(func() { + r.logger.V(1).Info("Creating ConfigResolver") + r.configResolver = services.NewConfigResolver(r.client, r.config, r.logger) + }) + return r.configResolver +} + +// CertificateResolver returns the certificate resolver service. +func (r *registry) CertificateResolver() services.CertificateResolver { + r.certificateResolverOnce.Do(func() { + r.logger.V(1).Info("Creating CertificateResolver") + r.certificateResolver = services.NewCertificateResolver( + r.client, + r.ConfigResolver(), // Dependency injection + r.HasCertManager(), // Provider detection + r.config, + r.logger, + ) + }) + return r.certificateResolver +} + +// SecretResolver returns the secret resolver service. +func (r *registry) SecretResolver() services.SecretResolver { + r.secretResolverOnce.Do(func() { + r.logger.V(1).Info("Creating SecretResolver") + r.secretResolver = services.NewSecretResolver( + r.client, + r.ConfigResolver(), // Dependency injection + r.config, + r.logger, + ) + }) + return r.secretResolver +} + +// DiscoveryService returns the discovery service. +func (r *registry) DiscoveryService() services.DiscoveryService { + r.discoveryServiceOnce.Do(func() { + r.logger.V(1).Info("Creating DiscoveryService") + r.discoveryService = services.NewDiscoveryService( + r.client, + r.config, + r.logger, + ) + }) + return r.discoveryService +} + +// ObservabilityService returns the observability service. +func (r *registry) ObservabilityService() services.ObservabilityService { + r.observabilityServiceOnce.Do(func() { + r.logger.V(1).Info("Creating ObservabilityService") + r.observabilityService = services.NewObservabilityService( + r.client, + r.ConfigResolver(), // Dependency injection + r.config, + r.logger, + ) + }) + return r.observabilityService +} + +// BackupService returns the backup service. +func (r *registry) BackupService() services.BackupService { + r.backupServiceOnce.Do(func() { + r.logger.V(1).Info("Creating BackupService") + r.backupService = services.NewBackupService( + r.client, + r.config, + r.logger, + ) + }) + return r.backupService +} + +// Builder factories + +func (r *registry) StatefulSetBuilder(namespace, ownerName string) builders.StatefulSetBuilder { + return services.NewStatefulSetBuilder(namespace, ownerName, r.ObservabilityService()) +} + +func (r *registry) ServiceBuilder(namespace, ownerName string) builders.ServiceBuilder { + return services.NewServiceBuilder(namespace, ownerName) +} + +func (r *registry) ConfigMapBuilder(namespace, ownerName string) builders.ConfigMapBuilder { + return services.NewConfigMapBuilder(namespace, ownerName) +} + +func (r *registry) PodBuilder(namespace, ownerName string) builders.PodBuilder { + return services.NewPodBuilder(namespace, ownerName) +} + +func (r *registry) DeploymentBuilder(namespace, ownerName string) builders.DeploymentBuilder { + return services.NewDeploymentBuilder(namespace, ownerName, r.ObservabilityService()) +} + +// Provider detection + +func (r *registry) HasCertManager() bool { + r.hasCertManagerOnce.Do(func() { + r.hasCertManager = r.detectCRD("cert-manager.io", "Certificate") + }) + return r.hasCertManager +} + +func (r *registry) HasExternalSecrets() bool { + r.hasExternalSecretsOnce.Do(func() { + r.hasExternalSecrets = r.detectCRD("external-secrets.io", "ExternalSecret") + }) + return r.hasExternalSecrets +} + +func (r *registry) HasOTelCollector() bool { + r.hasOTelCollectorOnce.Do(func() { + r.hasOTelCollector = r.detectCRD("opentelemetry.io", "OpenTelemetryCollector") + }) + return r.hasOTelCollector +} + +// detectProviders runs provider detection for all providers. +func (r *registry) detectProviders() { + // Force detection by calling the methods + _ = r.HasCertManager() + _ = r.HasExternalSecrets() + _ = r.HasOTelCollector() +} + +// detectCRD checks if a CRD exists in the cluster. +func (r *registry) detectCRD(group, kind string) bool { + _, err := r.client.RESTMapper().RESTMapping( + schema.GroupKind{Group: group, Kind: kind}, + ) + return err == nil +} + +// initConfigResolver initializes the config resolver on startup. +func (r *registry) initConfigResolver(ctx context.Context) error { + resolver := r.ConfigResolver() + if resolver == nil { + return fmt.Errorf("config resolver not available") + } + + // Start watches for configuration changes + if err := resolver.StartWatches(ctx); err != nil { + return fmt.Errorf("failed to start config watches: %w", err) + } + + return nil +} diff --git a/pkg/platform-sdk/internal/runtime.go b/pkg/platform-sdk/internal/runtime.go new file mode 100644 index 000000000..6bc0a7f3a --- /dev/null +++ b/pkg/platform-sdk/internal/runtime.go @@ -0,0 +1,157 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal contains internal implementations not exported to users. +package internal + +import ( + "context" + "fmt" + "sync" + + "github.com/go-logr/logr" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// runtime implements the api.Runtime interface. +// +// The runtime is the main SDK coordinator that: +// - Creates and manages the service registry +// - Initializes all services on Start() +// - Provides ReconcileContext instances on demand +// - Manages lifecycle (Start/Stop) +type runtime struct { + client client.Client + config *api.RuntimeConfig + logger logr.Logger + + // registry holds all services (resolvers, providers, discovery, etc.) + registry *registry + + // started indicates if the runtime has been started + started bool + mu sync.RWMutex + + // stopCh is used to signal shutdown + stopCh chan struct{} +} + +// NewRuntime creates a new runtime implementation. +func NewRuntime(client client.Client, config *api.RuntimeConfig) (api.Runtime, error) { + if client == nil { + return nil, fmt.Errorf("client cannot be nil") + } + + if config == nil { + return nil, fmt.Errorf("config cannot be nil") + } + + r := &runtime{ + client: client, + config: config, + logger: config.Logger, + stopCh: make(chan struct{}), + } + + // Create the service registry + registry, err := newRegistry(client, config, r.logger) + if err != nil { + return nil, fmt.Errorf("failed to create registry: %w", err) + } + r.registry = registry + + return r, nil +} + +// NewReconcileContext creates a context for a single reconciliation. +func (r *runtime) NewReconcileContext(ctx context.Context, namespace, name string) api.ReconcileContext { + logger := r.logger.WithValues("namespace", namespace, "name", name) + + return &reconcileContext{ + ctx: ctx, + namespace: namespace, + name: name, + runtime: r, + registry: r.registry, + logger: logger, + } +} + +// Start initializes the SDK runtime. +func (r *runtime) Start(ctx context.Context) error { + r.mu.Lock() + defer r.mu.Unlock() + + if r.started { + return fmt.Errorf("runtime already started") + } + + r.logger.Info("Starting Platform SDK runtime", + "clusterScoped", r.config.ClusterScoped, + "namespace", r.config.Namespace, + "dryRun", r.config.DryRun, + ) + + // Start the registry (initializes services, starts watches) + if err := r.registry.Start(ctx); err != nil { + return fmt.Errorf("failed to start registry: %w", err) + } + + r.started = true + r.logger.Info("Platform SDK runtime started successfully") + + return nil +} + +// Stop gracefully shuts down the SDK. +func (r *runtime) Stop() error { + r.mu.Lock() + defer r.mu.Unlock() + + if !r.started { + return nil + } + + r.logger.Info("Stopping Platform SDK runtime") + + // Signal shutdown + close(r.stopCh) + + // Stop the registry (stops watches, closes connections) + if err := r.registry.Stop(); err != nil { + return fmt.Errorf("failed to stop registry: %w", err) + } + + r.started = false + r.logger.Info("Platform SDK runtime stopped successfully") + + return nil +} + +// GetClient returns the Kubernetes client. +func (r *runtime) GetClient() client.Client { + return r.client +} + +// GetLogger returns the logger. +func (r *runtime) GetLogger() logr.Logger { + return r.logger +} + +// GetEventRecorder returns the event recorder. +func (r *runtime) GetEventRecorder() record.EventRecorder { + return r.config.EventRecorder +} diff --git a/pkg/platform-sdk/sdk.go b/pkg/platform-sdk/sdk.go new file mode 100644 index 000000000..467fa5bae --- /dev/null +++ b/pkg/platform-sdk/sdk.go @@ -0,0 +1,81 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sdk provides the Platform SDK for Kubernetes controllers. +// +// The Platform SDK simplifies controller development by providing: +// - Certificate resolution via cert-manager or self-signed fallback +// - Secret validation with External Secrets Operator integration +// - Service discovery with health checking and caching +// - Resource builders with best practices built-in +// +// See the api package for the public interfaces and usage examples. +package sdk + +import ( + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "github.com/splunk/splunk-operator/pkg/platform-sdk/internal" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// NewRuntime creates a new SDK Runtime instance. +// +// The Runtime is the main entry point for the Platform SDK. Create one +// instance per controller and reuse it across reconciliations. +// +// Example: +// +// runtime, err := sdk.NewRuntime(mgr.GetClient(), +// sdk.WithClusterScoped(), +// sdk.WithLogger(log), +// ) +// if err != nil { +// return err +// } +// +// if err := runtime.Start(ctx); err != nil { +// return err +// } +// +// The Runtime manages: +// - Service lifecycle (resolvers, providers, discovery) +// - Configuration watching (PlatformConfig, TenantConfig) +// - Provider detection (cert-manager, ESO, OTel) +// - Resource caching and connection pooling +func NewRuntime(client client.Client, opts ...api.RuntimeOption) (api.Runtime, error) { + // Build configuration from options + config := &api.RuntimeConfig{ + ClusterScoped: true, // Default to cluster-scoped + Logger: log.Log.WithName("platform-sdk"), + MetricsPort: 8383, + } + + for _, opt := range opts { + opt(config) + } + + // Create the runtime implementation + return internal.NewRuntime(client, config) +} + +// Re-export option functions for convenience +var ( + WithClusterScoped = api.WithClusterScoped + WithNamespace = api.WithNamespace + WithLogger = api.WithLogger + WithDryRun = api.WithDryRun + WithMetrics = api.WithMetrics + WithEventRecorder = api.WithEventRecorder +) diff --git a/pkg/platform-sdk/services/builders/configmap.go b/pkg/platform-sdk/services/builders/configmap.go new file mode 100644 index 000000000..f029f4f5b --- /dev/null +++ b/pkg/platform-sdk/services/builders/configmap.go @@ -0,0 +1,134 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builders + +import ( + "fmt" + + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/builders" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ConfigMapBuilder builds ConfigMap resources with a fluent API. +type ConfigMapBuilder struct { + namespace string + ownerName string + + // ConfigMap fields + name string + data map[string]string + binaryData map[string][]byte + labels map[string]string + annotations map[string]string +} + +// NewConfigMapBuilder creates a new ConfigMapBuilder. +func NewConfigMapBuilder(namespace, ownerName string) *ConfigMapBuilder { + return &ConfigMapBuilder{ + namespace: namespace, + ownerName: ownerName, + data: make(map[string]string), + binaryData: make(map[string][]byte), + labels: make(map[string]string), + annotations: make(map[string]string), + } +} + +// WithName sets the ConfigMap name. +func (b *ConfigMapBuilder) WithName(name string) builders.ConfigMapBuilder { + b.name = name + return b +} + +// WithNamespace sets the namespace. +func (b *ConfigMapBuilder) WithNamespace(namespace string) builders.ConfigMapBuilder { + b.namespace = namespace + return b +} + +// WithData sets string data. +func (b *ConfigMapBuilder) WithData(data map[string]string) builders.ConfigMapBuilder { + for k, v := range data { + b.data[k] = v + } + return b +} + +// WithBinaryData sets binary data. +func (b *ConfigMapBuilder) WithBinaryData(binaryData map[string][]byte) builders.ConfigMapBuilder { + for k, v := range binaryData { + b.binaryData[k] = v + } + return b +} + +// WithLabels sets labels. +func (b *ConfigMapBuilder) WithLabels(labels map[string]string) builders.ConfigMapBuilder { + for k, v := range labels { + b.labels[k] = v + } + return b +} + +// WithAnnotations sets annotations. +func (b *ConfigMapBuilder) WithAnnotations(annotations map[string]string) builders.ConfigMapBuilder { + for k, v := range annotations { + b.annotations[k] = v + } + return b +} + +// Build constructs the ConfigMap. +func (b *ConfigMapBuilder) Build() (*corev1.ConfigMap, error) { + if b.name == "" { + return nil, fmt.Errorf("name is required") + } + if len(b.data) == 0 && len(b.binaryData) == 0 { + return nil, fmt.Errorf("at least one data entry is required") + } + + // Build labels + labels := b.buildLabels() + + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.name, + Namespace: b.namespace, + Labels: labels, + Annotations: b.annotations, + }, + Data: b.data, + BinaryData: b.binaryData, + } + + return configMap, nil +} + +// buildLabels constructs the label map. +func (b *ConfigMapBuilder) buildLabels() map[string]string { + labels := map[string]string{ + "app.kubernetes.io/name": b.name, + "app.kubernetes.io/instance": b.ownerName, + "app.kubernetes.io/managed-by": "splunk-operator", + } + + // Merge user labels + for k, v := range b.labels { + labels[k] = v + } + + return labels +} diff --git a/pkg/platform-sdk/services/builders/configmap_test.go b/pkg/platform-sdk/services/builders/configmap_test.go new file mode 100644 index 000000000..8c99ff161 --- /dev/null +++ b/pkg/platform-sdk/services/builders/configmap_test.go @@ -0,0 +1,186 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. + +package builders + +import ( + "testing" +) + +func TestConfigMapBuilder_Build(t *testing.T) { + tests := []struct { + name string + setupFunc func(*ConfigMapBuilder) + wantErr bool + errMsg string + validate func(*testing.T, *ConfigMapBuilder) + }{ + { + name: "valid basic configmap", + setupFunc: func(b *ConfigMapBuilder) { + b.WithName("test-cm"). + WithData(map[string]string{"key": "value"}) + }, + wantErr: false, + validate: func(t *testing.T, b *ConfigMapBuilder) { + cm, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if cm.Name != "test-cm" { + t.Errorf("Name = %v, want test-cm", cm.Name) + } + if cm.Data["key"] != "value" { + t.Error("Data not set correctly") + } + }, + }, + { + name: "missing name", + setupFunc: func(b *ConfigMapBuilder) { + b.WithData(map[string]string{"key": "value"}) + }, + wantErr: true, + errMsg: "name is required", + }, + { + name: "missing data", + setupFunc: func(b *ConfigMapBuilder) { + b.WithName("test-cm") + }, + wantErr: true, + errMsg: "at least one data entry is required", + }, + { + name: "with binary data", + setupFunc: func(b *ConfigMapBuilder) { + b.WithName("test-cm"). + WithBinaryData(map[string][]byte{"binary": []byte{0x00, 0x01}}) + }, + wantErr: false, + validate: func(t *testing.T, b *ConfigMapBuilder) { + cm, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if len(cm.BinaryData["binary"]) != 2 { + t.Error("Binary data not set correctly") + } + }, + }, + { + name: "with both string and binary data", + setupFunc: func(b *ConfigMapBuilder) { + b.WithName("test-cm"). + WithData(map[string]string{"text": "value"}). + WithBinaryData(map[string][]byte{"binary": []byte{0x00}}) + }, + wantErr: false, + validate: func(t *testing.T, b *ConfigMapBuilder) { + cm, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if cm.Data["text"] != "value" { + t.Error("String data not set") + } + if len(cm.BinaryData["binary"]) != 1 { + t.Error("Binary data not set") + } + }, + }, + { + name: "multiple data entries", + setupFunc: func(b *ConfigMapBuilder) { + b.WithName("test-cm"). + WithData(map[string]string{ + "config.yaml": "content1", + "app.conf": "content2", + }) + }, + wantErr: false, + validate: func(t *testing.T, b *ConfigMapBuilder) { + cm, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if len(cm.Data) != 2 { + t.Errorf("Data count = %v, want 2", len(cm.Data)) + } + }, + }, + { + name: "with labels", + setupFunc: func(b *ConfigMapBuilder) { + b.WithName("test-cm"). + WithData(map[string]string{"key": "value"}). + WithLabels(map[string]string{"custom": "label"}) + }, + wantErr: false, + validate: func(t *testing.T, b *ConfigMapBuilder) { + cm, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if cm.Labels["custom"] != "label" { + t.Error("Custom label not set") + } + }, + }, + { + name: "standard labels applied", + setupFunc: func(b *ConfigMapBuilder) { + b.WithName("test-cm"). + WithData(map[string]string{"key": "value"}) + }, + wantErr: false, + validate: func(t *testing.T, b *ConfigMapBuilder) { + cm, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if cm.Labels["app.kubernetes.io/name"] != "test-cm" { + t.Error("Standard name label not set") + } + if cm.Labels["app.kubernetes.io/managed-by"] != "splunk-operator" { + t.Error("Standard managed-by label not set") + } + }, + }, + { + name: "incremental data addition", + setupFunc: func(b *ConfigMapBuilder) { + b.WithName("test-cm"). + WithData(map[string]string{"key1": "value1"}). + WithData(map[string]string{"key2": "value2"}) + }, + wantErr: false, + validate: func(t *testing.T, b *ConfigMapBuilder) { + cm, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if cm.Data["key1"] != "value1" || cm.Data["key2"] != "value2" { + t.Error("Incremental data addition failed") + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + builder := NewConfigMapBuilder("default", "test-owner") + tt.setupFunc(builder) + + if tt.wantErr { + _, err := builder.Build() + if err == nil { + t.Error("Build() expected error but got none") + } else if tt.errMsg != "" && err.Error() != tt.errMsg { + t.Errorf("Build() error = %v, want %v", err.Error(), tt.errMsg) + } + } else if tt.validate != nil { + tt.validate(t, builder) + } + }) + } +} diff --git a/pkg/platform-sdk/services/builders/deployment.go b/pkg/platform-sdk/services/builders/deployment.go new file mode 100644 index 000000000..3058fcb86 --- /dev/null +++ b/pkg/platform-sdk/services/builders/deployment.go @@ -0,0 +1,374 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builders + +import ( + "context" + "fmt" + + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/builders" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DeploymentBuilder builds Deployment resources with a fluent API. +type DeploymentBuilder struct { + namespace string + ownerName string + observability ObservabilityService + ctx context.Context + + // Deployment fields + name string + replicas *int32 + labels map[string]string + annotations map[string]string + + // Container fields + image string + ports []corev1.ContainerPort + command []string + args []string + env []corev1.EnvVar + envFrom []corev1.EnvFromSource + volumeMounts []corev1.VolumeMount + resources corev1.ResourceRequirements + + // Pod fields + volumes []corev1.Volume + serviceAccountName string + securityContext *corev1.PodSecurityContext + + // SDK-managed resources + certificates []*certificate.Ref + secrets []*secret.Ref + configMaps []string + + // Observability + addObservability bool +} + +// NewDeploymentBuilder creates a new DeploymentBuilder. +func NewDeploymentBuilder(namespace, ownerName string, observability ObservabilityService) *DeploymentBuilder { + return &DeploymentBuilder{ + namespace: namespace, + ownerName: ownerName, + observability: observability, + labels: make(map[string]string), + annotations: make(map[string]string), + replicas: int32Ptr(1), + } +} + +// WithName sets the Deployment name. +func (b *DeploymentBuilder) WithName(name string) builders.DeploymentBuilder { + b.name = name + return b +} + +// WithNamespace sets the namespace. +func (b *DeploymentBuilder) WithNamespace(namespace string) builders.DeploymentBuilder { + b.namespace = namespace + return b +} + +// WithReplicas sets the replica count. +func (b *DeploymentBuilder) WithReplicas(replicas int32) builders.DeploymentBuilder { + b.replicas = &replicas + return b +} + +// WithImage sets the container image. +func (b *DeploymentBuilder) WithImage(image string) builders.DeploymentBuilder { + b.image = image + return b +} + +// WithPorts sets the container ports. +func (b *DeploymentBuilder) WithPorts(ports []corev1.ContainerPort) builders.DeploymentBuilder { + b.ports = ports + return b +} + +// WithCommand sets the container command. +func (b *DeploymentBuilder) WithCommand(command []string) builders.DeploymentBuilder { + b.command = command + return b +} + +// WithArgs sets the container args. +func (b *DeploymentBuilder) WithArgs(args []string) builders.DeploymentBuilder { + b.args = args + return b +} + +// WithSecret adds a secret reference. +func (b *DeploymentBuilder) WithSecret(ref *secret.Ref) builders.DeploymentBuilder { + b.secrets = append(b.secrets, ref) + return b +} + +// WithCertificate adds a certificate reference. +func (b *DeploymentBuilder) WithCertificate(ref *certificate.Ref) builders.DeploymentBuilder { + b.certificates = append(b.certificates, ref) + return b +} + +// WithConfigMap adds a config map reference. +func (b *DeploymentBuilder) WithConfigMap(name string) builders.DeploymentBuilder { + b.configMaps = append(b.configMaps, name) + return b +} + +// WithObservability enables observability annotations. +func (b *DeploymentBuilder) WithObservability() builders.DeploymentBuilder { + b.addObservability = true + return b +} + +// WithEnv adds an environment variable. +func (b *DeploymentBuilder) WithEnv(env corev1.EnvVar) builders.DeploymentBuilder { + b.env = append(b.env, env) + return b +} + +// WithEnvFrom adds an environment source. +func (b *DeploymentBuilder) WithEnvFrom(envFrom corev1.EnvFromSource) builders.DeploymentBuilder { + b.envFrom = append(b.envFrom, envFrom) + return b +} + +// WithVolume adds a volume. +func (b *DeploymentBuilder) WithVolume(volume corev1.Volume) builders.DeploymentBuilder { + b.volumes = append(b.volumes, volume) + return b +} + +// WithVolumeMount adds a volume mount. +func (b *DeploymentBuilder) WithVolumeMount(mount corev1.VolumeMount) builders.DeploymentBuilder { + b.volumeMounts = append(b.volumeMounts, mount) + return b +} + +// WithResources sets resource requirements. +func (b *DeploymentBuilder) WithResources(resources corev1.ResourceRequirements) builders.DeploymentBuilder { + b.resources = resources + return b +} + +// WithLabels sets labels. +func (b *DeploymentBuilder) WithLabels(labels map[string]string) builders.DeploymentBuilder { + for k, v := range labels { + b.labels[k] = v + } + return b +} + +// WithAnnotations sets annotations. +func (b *DeploymentBuilder) WithAnnotations(annotations map[string]string) builders.DeploymentBuilder { + for k, v := range annotations { + b.annotations[k] = v + } + return b +} + +// Build constructs the Deployment. +func (b *DeploymentBuilder) Build() (*appsv1.Deployment, error) { + if b.name == "" { + return nil, fmt.Errorf("name is required") + } + if b.image == "" { + return nil, fmt.Errorf("image is required") + } + + // Add default labels + labels := b.buildLabels() + annotations := b.buildAnnotations() + + // Add observability annotations if requested + if b.addObservability && b.observability != nil && b.ctx != nil { + obsAnnotations, err := b.observability.GetObservabilityAnnotations(b.ctx, b.namespace) + if err != nil { + return nil, fmt.Errorf("failed to get observability annotations: %w", err) + } + for k, v := range obsAnnotations { + annotations[k] = v + } + } + + // Add volumes for SDK-managed resources + volumes := b.buildVolumes() + volumeMounts := b.buildVolumeMounts() + + // Build container + container := corev1.Container{ + Name: b.name, + Image: b.image, + Ports: b.ports, + Command: b.command, + Args: b.args, + Env: b.env, + EnvFrom: b.envFrom, + VolumeMounts: volumeMounts, + Resources: b.resources, + } + + // Build pod template + podTemplate := corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Annotations: annotations, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{container}, + Volumes: volumes, + }, + } + + if b.serviceAccountName != "" { + podTemplate.Spec.ServiceAccountName = b.serviceAccountName + } + + if b.securityContext != nil { + podTemplate.Spec.SecurityContext = b.securityContext + } + + // Build Deployment + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.name, + Namespace: b.namespace, + Labels: labels, + Annotations: annotations, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: b.replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: podTemplate, + }, + } + + return deployment, nil +} + +// buildLabels constructs the label map. +func (b *DeploymentBuilder) buildLabels() map[string]string { + labels := map[string]string{ + "app.kubernetes.io/name": b.name, + "app.kubernetes.io/instance": b.ownerName, + "app.kubernetes.io/managed-by": "splunk-operator", + } + + // Merge user labels + for k, v := range b.labels { + labels[k] = v + } + + return labels +} + +// buildAnnotations constructs the annotation map. +func (b *DeploymentBuilder) buildAnnotations() map[string]string { + annotations := make(map[string]string) + + // Merge user annotations + for k, v := range b.annotations { + annotations[k] = v + } + + return annotations +} + +// buildVolumes constructs volumes from SDK-managed resources. +func (b *DeploymentBuilder) buildVolumes() []corev1.Volume { + volumes := make([]corev1.Volume, 0, len(b.volumes)) + + // Add user-provided volumes + volumes = append(volumes, b.volumes...) + + // Add certificate volumes + for i, cert := range b.certificates { + volumeName := fmt.Sprintf("cert-%d", i) + if cert.SecretName != "" { + volumeName = cert.SecretName + } + + volumes = append(volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: cert.SecretName, + }, + }, + }) + } + + // Add secret volumes + for _, sec := range b.secrets { + volumes = append(volumes, corev1.Volume{ + Name: sec.SecretName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: sec.SecretName, + }, + }, + }) + } + + // Add config map volumes + for _, cm := range b.configMaps { + volumes = append(volumes, corev1.Volume{ + Name: cm, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cm, + }, + }, + }, + }) + } + + return volumes +} + +// buildVolumeMounts constructs volume mounts. +func (b *DeploymentBuilder) buildVolumeMounts() []corev1.VolumeMount { + mounts := make([]corev1.VolumeMount, 0, len(b.volumeMounts)) + + // Add user-provided mounts + mounts = append(mounts, b.volumeMounts...) + + // Add default mounts for certificates + for i, cert := range b.certificates { + volumeName := fmt.Sprintf("cert-%d", i) + if cert.SecretName != "" { + volumeName = cert.SecretName + } + + mounts = append(mounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/certs/%s", volumeName), + ReadOnly: true, + }) + } + + return mounts +} diff --git a/pkg/platform-sdk/services/builders/service.go b/pkg/platform-sdk/services/builders/service.go new file mode 100644 index 000000000..d48938aac --- /dev/null +++ b/pkg/platform-sdk/services/builders/service.go @@ -0,0 +1,162 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builders + +import ( + "fmt" + + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/builders" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ServiceBuilder builds Service resources with a fluent API. +type ServiceBuilder struct { + namespace string + ownerName string + + // Service fields + name string + serviceType corev1.ServiceType + ports []corev1.ServicePort + selector map[string]string + labels map[string]string + annotations map[string]string + + // Discovery labels + addDiscoveryLabels bool +} + +// NewServiceBuilder creates a new ServiceBuilder. +func NewServiceBuilder(namespace, ownerName string) *ServiceBuilder { + return &ServiceBuilder{ + namespace: namespace, + ownerName: ownerName, + serviceType: corev1.ServiceTypeClusterIP, + labels: make(map[string]string), + annotations: make(map[string]string), + } +} + +// WithName sets the Service name. +func (b *ServiceBuilder) WithName(name string) builders.ServiceBuilder { + b.name = name + return b +} + +// WithNamespace sets the namespace. +func (b *ServiceBuilder) WithNamespace(namespace string) builders.ServiceBuilder { + b.namespace = namespace + return b +} + +// WithType sets the service type. +func (b *ServiceBuilder) WithType(serviceType corev1.ServiceType) builders.ServiceBuilder { + b.serviceType = serviceType + return b +} + +// WithPorts sets the service ports. +func (b *ServiceBuilder) WithPorts(ports []corev1.ServicePort) builders.ServiceBuilder { + b.ports = ports + return b +} + +// WithSelector sets the pod selector. +func (b *ServiceBuilder) WithSelector(selector map[string]string) builders.ServiceBuilder { + b.selector = selector + return b +} + +// WithLabels sets labels. +func (b *ServiceBuilder) WithLabels(labels map[string]string) builders.ServiceBuilder { + for k, v := range labels { + b.labels[k] = v + } + return b +} + +// WithAnnotations sets annotations. +func (b *ServiceBuilder) WithAnnotations(annotations map[string]string) builders.ServiceBuilder { + for k, v := range annotations { + b.annotations[k] = v + } + return b +} + +// WithDiscoveryLabels adds standard labels for service discovery. +func (b *ServiceBuilder) WithDiscoveryLabels() builders.ServiceBuilder { + b.addDiscoveryLabels = true + return b +} + +// Build constructs the Service. +func (b *ServiceBuilder) Build() (*corev1.Service, error) { + if b.name == "" { + return nil, fmt.Errorf("name is required") + } + if len(b.ports) == 0 { + return nil, fmt.Errorf("at least one port is required") + } + + // Build labels + labels := b.buildLabels() + + // Build selector - default to same as labels if not specified + selector := b.selector + if selector == nil { + selector = map[string]string{ + "app.kubernetes.io/name": b.name, + "app.kubernetes.io/instance": b.ownerName, + } + } + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.name, + Namespace: b.namespace, + Labels: labels, + Annotations: b.annotations, + }, + Spec: corev1.ServiceSpec{ + Type: b.serviceType, + Ports: b.ports, + Selector: selector, + }, + } + + return service, nil +} + +// buildLabels constructs the label map. +func (b *ServiceBuilder) buildLabels() map[string]string { + labels := map[string]string{ + "app.kubernetes.io/name": b.name, + "app.kubernetes.io/instance": b.ownerName, + "app.kubernetes.io/managed-by": "splunk-operator", + } + + // Add discovery labels if requested + if b.addDiscoveryLabels { + labels["splunk.com/discoverable"] = "true" + } + + // Merge user labels + for k, v := range b.labels { + labels[k] = v + } + + return labels +} diff --git a/pkg/platform-sdk/services/builders/service_test.go b/pkg/platform-sdk/services/builders/service_test.go new file mode 100644 index 000000000..a0e2809e2 --- /dev/null +++ b/pkg/platform-sdk/services/builders/service_test.go @@ -0,0 +1,193 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. + +package builders + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" +) + +func TestServiceBuilder_Build(t *testing.T) { + tests := []struct { + name string + setupFunc func(*ServiceBuilder) + wantErr bool + errMsg string + validate func(*testing.T, *ServiceBuilder) + }{ + { + name: "valid basic service", + setupFunc: func(b *ServiceBuilder) { + b.WithName("test-svc"). + WithPorts([]corev1.ServicePort{ + {Name: "web", Port: 8000}, + }) + }, + wantErr: false, + validate: func(t *testing.T, b *ServiceBuilder) { + svc, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if svc.Name != "test-svc" { + t.Errorf("Name = %v, want test-svc", svc.Name) + } + if len(svc.Spec.Ports) != 1 { + t.Errorf("Ports count = %v, want 1", len(svc.Spec.Ports)) + } + }, + }, + { + name: "missing name", + setupFunc: func(b *ServiceBuilder) { + b.WithPorts([]corev1.ServicePort{{Name: "web", Port: 8000}}) + }, + wantErr: true, + errMsg: "name is required", + }, + { + name: "missing ports", + setupFunc: func(b *ServiceBuilder) { + b.WithName("test-svc") + }, + wantErr: true, + errMsg: "at least one port is required", + }, + { + name: "with service type", + setupFunc: func(b *ServiceBuilder) { + b.WithName("test-svc"). + WithType(corev1.ServiceTypeNodePort). + WithPorts([]corev1.ServicePort{{Name: "web", Port: 8000}}) + }, + wantErr: false, + validate: func(t *testing.T, b *ServiceBuilder) { + svc, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if svc.Spec.Type != corev1.ServiceTypeNodePort { + t.Errorf("Type = %v, want NodePort", svc.Spec.Type) + } + }, + }, + { + name: "with custom selector", + setupFunc: func(b *ServiceBuilder) { + b.WithName("test-svc"). + WithPorts([]corev1.ServicePort{{Name: "web", Port: 8000}}). + WithSelector(map[string]string{"app": "custom"}) + }, + wantErr: false, + validate: func(t *testing.T, b *ServiceBuilder) { + svc, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if svc.Spec.Selector["app"] != "custom" { + t.Error("Custom selector not applied") + } + }, + }, + { + name: "with discovery labels", + setupFunc: func(b *ServiceBuilder) { + b.WithName("test-svc"). + WithPorts([]corev1.ServicePort{{Name: "web", Port: 8000}}). + WithDiscoveryLabels() + }, + wantErr: false, + validate: func(t *testing.T, b *ServiceBuilder) { + svc, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if svc.Labels["splunk.com/discoverable"] != "true" { + t.Error("Discovery label not set") + } + }, + }, + { + name: "default selector generated", + setupFunc: func(b *ServiceBuilder) { + b.WithName("test-svc"). + WithPorts([]corev1.ServicePort{{Name: "web", Port: 8000}}) + }, + wantErr: false, + validate: func(t *testing.T, b *ServiceBuilder) { + svc, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if svc.Spec.Selector["app.kubernetes.io/name"] != "test-svc" { + t.Error("Default selector not generated") + } + }, + }, + { + name: "multiple ports", + setupFunc: func(b *ServiceBuilder) { + b.WithName("test-svc"). + WithPorts([]corev1.ServicePort{ + {Name: "web", Port: 8000}, + {Name: "mgmt", Port: 8089}, + {Name: "hec", Port: 8088}, + }) + }, + wantErr: false, + validate: func(t *testing.T, b *ServiceBuilder) { + svc, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if len(svc.Spec.Ports) != 3 { + t.Errorf("Ports count = %v, want 3", len(svc.Spec.Ports)) + } + }, + }, + { + name: "standard labels applied", + setupFunc: func(b *ServiceBuilder) { + b.WithName("test-svc"). + WithPorts([]corev1.ServicePort{{Name: "web", Port: 8000}}) + }, + wantErr: false, + validate: func(t *testing.T, b *ServiceBuilder) { + svc, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if svc.Labels["app.kubernetes.io/managed-by"] != "splunk-operator" { + t.Error("Standard managed-by label not set") + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + builder := NewServiceBuilder("default", "test-owner") + tt.setupFunc(builder) + + if tt.wantErr { + _, err := builder.Build() + if err == nil { + t.Error("Build() expected error but got none") + } else if tt.errMsg != "" && err.Error() != tt.errMsg { + t.Errorf("Build() error = %v, want %v", err.Error(), tt.errMsg) + } + } else if tt.validate != nil { + tt.validate(t, builder) + } + }) + } +} + +func TestServiceBuilder_Defaults(t *testing.T) { + builder := NewServiceBuilder("test-ns", "test-owner") + + if builder.serviceType != corev1.ServiceTypeClusterIP { + t.Errorf("default service type = %v, want ClusterIP", builder.serviceType) + } +} diff --git a/pkg/platform-sdk/services/builders/statefulset.go b/pkg/platform-sdk/services/builders/statefulset.go new file mode 100644 index 000000000..f84df4156 --- /dev/null +++ b/pkg/platform-sdk/services/builders/statefulset.go @@ -0,0 +1,556 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builders + +import ( + "context" + "fmt" + + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/builders" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ObservabilityService interface to avoid import cycle. +type ObservabilityService interface { + ShouldAddObservability(ctx context.Context, namespace string) (bool, error) + GetObservabilityAnnotations(ctx context.Context, namespace string) (map[string]string, error) +} + +// StatefulSetBuilder builds StatefulSet resources with a fluent API. +type StatefulSetBuilder struct { + namespace string + ownerName string + observability ObservabilityService + ctx context.Context + + // StatefulSet fields + name string + replicas *int32 + labels map[string]string + annotations map[string]string + + // Container fields + image string + ports []corev1.ContainerPort + command []string + args []string + env []corev1.EnvVar + envFrom []corev1.EnvFromSource + volumeMounts []corev1.VolumeMount + resources corev1.ResourceRequirements + + // Pod fields + volumes []corev1.Volume + serviceAccountName string + podSecurityContext *corev1.PodSecurityContext + affinity *corev1.Affinity + terminationGracePeriodSeconds *int64 + imagePullPolicy corev1.PullPolicy + priorityClassName string + + // Container security and probes + containerSecurityContext *corev1.SecurityContext + livenessProbe *corev1.Probe + readinessProbe *corev1.Probe + startupProbe *corev1.Probe + + // StatefulSet-specific + serviceName string + volumeClaimTemplates []corev1.PersistentVolumeClaim + updateStrategy *appsv1.StatefulSetUpdateStrategy + pvcRetentionPolicy *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy + + // SDK-managed resources + certificates []*certificate.Ref + secrets []*secret.Ref + configMaps []string + + // Observability + addObservability bool +} + +// NewStatefulSetBuilder creates a new StatefulSetBuilder. +func NewStatefulSetBuilder(namespace, ownerName string, observability ObservabilityService) *StatefulSetBuilder { + return &StatefulSetBuilder{ + namespace: namespace, + ownerName: ownerName, + observability: observability, + labels: make(map[string]string), + annotations: make(map[string]string), + replicas: int32Ptr(1), + } +} + +// WithName sets the StatefulSet name. +func (b *StatefulSetBuilder) WithName(name string) builders.StatefulSetBuilder { + b.name = name + return b +} + +// WithNamespace sets the namespace. +func (b *StatefulSetBuilder) WithNamespace(namespace string) builders.StatefulSetBuilder { + b.namespace = namespace + return b +} + +// WithReplicas sets the replica count. +func (b *StatefulSetBuilder) WithReplicas(replicas int32) builders.StatefulSetBuilder { + b.replicas = &replicas + return b +} + +// WithImage sets the container image. +func (b *StatefulSetBuilder) WithImage(image string) builders.StatefulSetBuilder { + b.image = image + return b +} + +// WithPorts sets the container ports. +func (b *StatefulSetBuilder) WithPorts(ports []corev1.ContainerPort) builders.StatefulSetBuilder { + b.ports = ports + return b +} + +// WithCommand sets the container command. +func (b *StatefulSetBuilder) WithCommand(command []string) builders.StatefulSetBuilder { + b.command = command + return b +} + +// WithArgs sets the container args. +func (b *StatefulSetBuilder) WithArgs(args []string) builders.StatefulSetBuilder { + b.args = args + return b +} + +// WithSecret adds a secret reference. +func (b *StatefulSetBuilder) WithSecret(ref *secret.Ref) builders.StatefulSetBuilder { + b.secrets = append(b.secrets, ref) + return b +} + +// WithCertificate adds a certificate reference. +func (b *StatefulSetBuilder) WithCertificate(ref *certificate.Ref) builders.StatefulSetBuilder { + b.certificates = append(b.certificates, ref) + return b +} + +// WithConfigMap adds a config map reference. +func (b *StatefulSetBuilder) WithConfigMap(name string) builders.StatefulSetBuilder { + b.configMaps = append(b.configMaps, name) + return b +} + +// WithObservability enables observability annotations. +func (b *StatefulSetBuilder) WithObservability() builders.StatefulSetBuilder { + b.addObservability = true + return b +} + +// WithEnv adds an environment variable. +func (b *StatefulSetBuilder) WithEnv(env corev1.EnvVar) builders.StatefulSetBuilder { + b.env = append(b.env, env) + return b +} + +// WithEnvFrom adds an environment source. +func (b *StatefulSetBuilder) WithEnvFrom(envFrom corev1.EnvFromSource) builders.StatefulSetBuilder { + b.envFrom = append(b.envFrom, envFrom) + return b +} + +// WithVolume adds a volume. +func (b *StatefulSetBuilder) WithVolume(volume corev1.Volume) builders.StatefulSetBuilder { + b.volumes = append(b.volumes, volume) + return b +} + +// WithVolumeMount adds a volume mount. +func (b *StatefulSetBuilder) WithVolumeMount(mount corev1.VolumeMount) builders.StatefulSetBuilder { + b.volumeMounts = append(b.volumeMounts, mount) + return b +} + +// WithResources sets resource requirements. +func (b *StatefulSetBuilder) WithResources(resources corev1.ResourceRequirements) builders.StatefulSetBuilder { + b.resources = resources + return b +} + +// WithLabels sets labels. +func (b *StatefulSetBuilder) WithLabels(labels map[string]string) builders.StatefulSetBuilder { + for k, v := range labels { + b.labels[k] = v + } + return b +} + +// WithAnnotations sets annotations. +func (b *StatefulSetBuilder) WithAnnotations(annotations map[string]string) builders.StatefulSetBuilder { + for k, v := range annotations { + b.annotations[k] = v + } + return b +} + +// WithPodSecurityContext sets the pod security context. +func (b *StatefulSetBuilder) WithPodSecurityContext(psc *corev1.PodSecurityContext) builders.StatefulSetBuilder { + b.podSecurityContext = psc + return b +} + +// WithSecurityContext sets the container security context. +func (b *StatefulSetBuilder) WithSecurityContext(sc *corev1.SecurityContext) builders.StatefulSetBuilder { + b.containerSecurityContext = sc + return b +} + +// WithAffinity sets pod affinity rules. +func (b *StatefulSetBuilder) WithAffinity(affinity *corev1.Affinity) builders.StatefulSetBuilder { + b.affinity = affinity + return b +} + +// WithLivenessProbe sets the liveness probe. +func (b *StatefulSetBuilder) WithLivenessProbe(probe *corev1.Probe) builders.StatefulSetBuilder { + b.livenessProbe = probe + return b +} + +// WithReadinessProbe sets the readiness probe. +func (b *StatefulSetBuilder) WithReadinessProbe(probe *corev1.Probe) builders.StatefulSetBuilder { + b.readinessProbe = probe + return b +} + +// WithStartupProbe sets the startup probe. +func (b *StatefulSetBuilder) WithStartupProbe(probe *corev1.Probe) builders.StatefulSetBuilder { + b.startupProbe = probe + return b +} + +// WithServiceName sets the StatefulSet service name. +func (b *StatefulSetBuilder) WithServiceName(name string) builders.StatefulSetBuilder { + b.serviceName = name + return b +} + +// WithVolumeClaimTemplate adds a volume claim template. +func (b *StatefulSetBuilder) WithVolumeClaimTemplate(pvc corev1.PersistentVolumeClaim) builders.StatefulSetBuilder { + b.volumeClaimTemplates = append(b.volumeClaimTemplates, pvc) + return b +} + +// WithUpdateStrategy sets the update strategy. +func (b *StatefulSetBuilder) WithUpdateStrategy(strategy appsv1.StatefulSetUpdateStrategy) builders.StatefulSetBuilder { + b.updateStrategy = &strategy + return b +} + +// WithPVCRetentionPolicy sets the PVC retention policy. +func (b *StatefulSetBuilder) WithPVCRetentionPolicy(policy *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy) builders.StatefulSetBuilder { + b.pvcRetentionPolicy = policy + return b +} + +// WithImagePullPolicy sets the image pull policy. +func (b *StatefulSetBuilder) WithImagePullPolicy(policy corev1.PullPolicy) builders.StatefulSetBuilder { + b.imagePullPolicy = policy + return b +} + +// WithTerminationGracePeriodSeconds sets the termination grace period. +func (b *StatefulSetBuilder) WithTerminationGracePeriodSeconds(seconds int64) builders.StatefulSetBuilder { + b.terminationGracePeriodSeconds = &seconds + return b +} + +// WithPriorityClassName sets the priority class name. +func (b *StatefulSetBuilder) WithPriorityClassName(className string) builders.StatefulSetBuilder { + b.priorityClassName = className + return b +} + +// Build constructs the StatefulSet. +func (b *StatefulSetBuilder) Build() (*appsv1.StatefulSet, error) { + if b.name == "" { + return nil, fmt.Errorf("name is required") + } + if b.image == "" { + return nil, fmt.Errorf("image is required") + } + + // Add default labels + labels := b.buildLabels() + annotations := b.buildAnnotations() + + // Add observability annotations if requested + if b.addObservability && b.observability != nil && b.ctx != nil { + obsAnnotations, err := b.observability.GetObservabilityAnnotations(b.ctx, b.namespace) + if err != nil { + return nil, fmt.Errorf("failed to get observability annotations: %w", err) + } + for k, v := range obsAnnotations { + annotations[k] = v + } + } + + // Add volumes for SDK-managed resources + volumes := b.buildVolumes() + volumeMounts := b.buildVolumeMounts() + + // Build container + container := corev1.Container{ + Name: b.name, + Image: b.image, + ImagePullPolicy: b.imagePullPolicy, + Ports: b.ports, + Command: b.command, + Args: b.args, + Env: b.env, + EnvFrom: b.envFrom, + VolumeMounts: volumeMounts, + Resources: b.resources, + SecurityContext: b.containerSecurityContext, + LivenessProbe: b.livenessProbe, + ReadinessProbe: b.readinessProbe, + StartupProbe: b.startupProbe, + } + + // Build pod template + podTemplate := corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Annotations: annotations, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{container}, + Volumes: volumes, + }, + } + + if b.serviceAccountName != "" { + podTemplate.Spec.ServiceAccountName = b.serviceAccountName + } + + if b.podSecurityContext != nil { + podTemplate.Spec.SecurityContext = b.podSecurityContext + } + + if b.affinity != nil { + podTemplate.Spec.Affinity = b.affinity + } + + if b.terminationGracePeriodSeconds != nil { + podTemplate.Spec.TerminationGracePeriodSeconds = b.terminationGracePeriodSeconds + } + + if b.priorityClassName != "" { + podTemplate.Spec.PriorityClassName = b.priorityClassName + } + + // Build StatefulSet + serviceName := b.serviceName + if serviceName == "" { + serviceName = b.name + } + + statefulSet := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.name, + Namespace: b.namespace, + Labels: labels, + Annotations: annotations, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: b.replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: podTemplate, + ServiceName: serviceName, + VolumeClaimTemplates: b.volumeClaimTemplates, + PodManagementPolicy: appsv1.ParallelPodManagement, + }, + } + + // Set optional fields + if b.updateStrategy != nil { + statefulSet.Spec.UpdateStrategy = *b.updateStrategy + } + + if b.pvcRetentionPolicy != nil { + statefulSet.Spec.PersistentVolumeClaimRetentionPolicy = b.pvcRetentionPolicy + } + + return statefulSet, nil +} + +// buildLabels constructs the label map. +func (b *StatefulSetBuilder) buildLabels() map[string]string { + labels := map[string]string{ + "app.kubernetes.io/name": b.name, + "app.kubernetes.io/instance": b.ownerName, + "app.kubernetes.io/managed-by": "splunk-operator", + } + + // Merge user labels + for k, v := range b.labels { + labels[k] = v + } + + return labels +} + +// buildAnnotations constructs the annotation map. +func (b *StatefulSetBuilder) buildAnnotations() map[string]string { + annotations := make(map[string]string) + + // Merge user annotations + for k, v := range b.annotations { + annotations[k] = v + } + + return annotations +} + +// buildVolumes constructs volumes from SDK-managed resources. +func (b *StatefulSetBuilder) buildVolumes() []corev1.Volume { + volumes := make([]corev1.Volume, 0, len(b.volumes)) + + // Add user-provided volumes + volumes = append(volumes, b.volumes...) + + // Add certificate volumes + for i, cert := range b.certificates { + volumeName := fmt.Sprintf("cert-%d", i) + if cert.SecretName != "" { + volumeName = cert.SecretName + } + + volumes = append(volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: cert.SecretName, + }, + }, + }) + } + + // Add secret volumes + for _, sec := range b.secrets { + // Check if this is a CSI secret + if sec.CSI != nil { + // Create CSI volume + volumes = append(volumes, corev1.Volume{ + Name: sec.SecretName, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: sec.CSI.Driver, + ReadOnly: boolPtr(true), + VolumeAttributes: map[string]string{ + "secretProviderClass": sec.CSI.ProviderClass, + }, + }, + }, + }) + } else { + // Create K8s Secret volume + volumes = append(volumes, corev1.Volume{ + Name: sec.SecretName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: sec.SecretName, + }, + }, + }) + } + } + + // Add config map volumes + for _, cm := range b.configMaps { + volumes = append(volumes, corev1.Volume{ + Name: cm, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cm, + }, + }, + }, + }) + } + + return volumes +} + +// buildVolumeMounts constructs volume mounts. +func (b *StatefulSetBuilder) buildVolumeMounts() []corev1.VolumeMount { + mounts := make([]corev1.VolumeMount, 0, len(b.volumeMounts)) + + // Add user-provided mounts + mounts = append(mounts, b.volumeMounts...) + + // Add default mounts for certificates + for i, cert := range b.certificates { + volumeName := fmt.Sprintf("cert-%d", i) + if cert.SecretName != "" { + volumeName = cert.SecretName + } + + // Use custom mount path if specified, otherwise default to /etc/certs/ + mountPath := cert.MountPath + if mountPath == "" { + mountPath = fmt.Sprintf("/etc/certs/%s", volumeName) + } + + mounts = append(mounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: mountPath, + ReadOnly: true, + }) + } + + // Add mounts for secrets - support both CSI and K8s Secrets + for _, sec := range b.secrets { + if sec.CSI != nil { + // CSI secret - use CSI-specific mount path + mounts = append(mounts, corev1.VolumeMount{ + Name: sec.SecretName, + MountPath: sec.CSI.MountPath, + ReadOnly: true, + }) + } + // Note: K8s Secret volumes don't need explicit mounts here + // They are referenced via envFrom in the container spec + } + + return mounts +} + +// int32Ptr returns a pointer to an int32 value. +func int32Ptr(i int32) *int32 { + return &i +} + +// boolPtr returns a pointer to a bool value. +func boolPtr(b bool) *bool { + return &b +} diff --git a/pkg/platform-sdk/services/builders/statefulset_test.go b/pkg/platform-sdk/services/builders/statefulset_test.go new file mode 100644 index 000000000..f17f8e2fe --- /dev/null +++ b/pkg/platform-sdk/services/builders/statefulset_test.go @@ -0,0 +1,676 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. + +package builders + +import ( + "testing" + + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestStatefulSetBuilder_Build(t *testing.T) { + tests := []struct { + name string + setupFunc func(*StatefulSetBuilder) + wantErr bool + errMsg string + validate func(*testing.T, *StatefulSetBuilder) + }{ + { + name: "valid basic statefulset", + setupFunc: func(b *StatefulSetBuilder) { + b.WithName("test-sts"). + WithImage("splunk/splunk:9.1.0"). + WithReplicas(3) + }, + wantErr: false, + validate: func(t *testing.T, b *StatefulSetBuilder) { + sts, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if sts.Name != "test-sts" { + t.Errorf("Name = %v, want test-sts", sts.Name) + } + if *sts.Spec.Replicas != 3 { + t.Errorf("Replicas = %v, want 3", *sts.Spec.Replicas) + } + }, + }, + { + name: "missing name", + setupFunc: func(b *StatefulSetBuilder) { + b.WithImage("splunk/splunk:9.1.0") + }, + wantErr: true, + errMsg: "name is required", + }, + { + name: "missing image", + setupFunc: func(b *StatefulSetBuilder) { + b.WithName("test-sts") + }, + wantErr: true, + errMsg: "image is required", + }, + { + name: "with certificate", + setupFunc: func(b *StatefulSetBuilder) { + b.WithName("test-sts"). + WithImage("splunk/splunk:9.1.0"). + WithCertificate(&certificate.Ref{ + SecretName: "my-tls", + Ready: true, + }) + }, + wantErr: false, + validate: func(t *testing.T, b *StatefulSetBuilder) { + sts, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + // Check volume created + found := false + for _, vol := range sts.Spec.Template.Spec.Volumes { + if vol.Name == "my-tls" { + found = true + break + } + } + if !found { + t.Error("Certificate volume not created") + } + // Check volume mount created + found = false + for _, mount := range sts.Spec.Template.Spec.Containers[0].VolumeMounts { + if mount.Name == "my-tls" { + found = true + break + } + } + if !found { + t.Error("Certificate volume mount not created") + } + }, + }, + { + name: "with secret", + setupFunc: func(b *StatefulSetBuilder) { + b.WithName("test-sts"). + WithImage("splunk/splunk:9.1.0"). + WithSecret(&secret.Ref{ + SecretName: "my-secret", + Ready: true, + }) + }, + wantErr: false, + validate: func(t *testing.T, b *StatefulSetBuilder) { + sts, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + // Check volume created + found := false + for _, vol := range sts.Spec.Template.Spec.Volumes { + if vol.Name == "my-secret" { + found = true + break + } + } + if !found { + t.Error("Secret volume not created") + } + }, + }, + { + name: "with labels and annotations", + setupFunc: func(b *StatefulSetBuilder) { + b.WithName("test-sts"). + WithImage("splunk/splunk:9.1.0"). + WithLabels(map[string]string{ + "custom": "label", + }). + WithAnnotations(map[string]string{ + "custom": "annotation", + }) + }, + wantErr: false, + validate: func(t *testing.T, b *StatefulSetBuilder) { + sts, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if sts.Labels["custom"] != "label" { + t.Error("Custom label not set") + } + if sts.Annotations["custom"] != "annotation" { + t.Error("Custom annotation not set") + } + }, + }, + { + name: "with resources", + setupFunc: func(b *StatefulSetBuilder) { + b.WithName("test-sts"). + WithImage("splunk/splunk:9.1.0"). + WithResources(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }) + }, + wantErr: false, + validate: func(t *testing.T, b *StatefulSetBuilder) { + sts, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + cpu := sts.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU] + if cpu.String() != "500m" { + t.Errorf("CPU request = %v, want 500m", cpu.String()) + } + }, + }, + { + name: "with env vars", + setupFunc: func(b *StatefulSetBuilder) { + b.WithName("test-sts"). + WithImage("splunk/splunk:9.1.0"). + WithEnv(corev1.EnvVar{Name: "FOO", Value: "bar"}). + WithEnv(corev1.EnvVar{Name: "BAZ", Value: "qux"}) + }, + wantErr: false, + validate: func(t *testing.T, b *StatefulSetBuilder) { + sts, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + env := sts.Spec.Template.Spec.Containers[0].Env + if len(env) != 2 { + t.Errorf("Env count = %v, want 2", len(env)) + } + }, + }, + { + name: "standard labels applied", + setupFunc: func(b *StatefulSetBuilder) { + b.WithName("test-sts"). + WithImage("splunk/splunk:9.1.0") + }, + wantErr: false, + validate: func(t *testing.T, b *StatefulSetBuilder) { + sts, err := b.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + if sts.Labels["app.kubernetes.io/name"] != "test-sts" { + t.Error("Standard name label not set") + } + if sts.Labels["app.kubernetes.io/managed-by"] != "splunk-operator" { + t.Error("Standard managed-by label not set") + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + builder := NewStatefulSetBuilder("default", "test-owner", nil) + tt.setupFunc(builder) + + if tt.wantErr { + _, err := builder.Build() + if err == nil { + t.Error("Build() expected error but got none") + } else if tt.errMsg != "" && err.Error() != tt.errMsg { + t.Errorf("Build() error = %v, want %v", err.Error(), tt.errMsg) + } + } else if tt.validate != nil { + tt.validate(t, builder) + } + }) + } +} + +func TestStatefulSetBuilder_Defaults(t *testing.T) { + builder := NewStatefulSetBuilder("test-ns", "test-owner", nil) + + // Check defaults + if builder.namespace != "test-ns" { + t.Errorf("namespace = %v, want test-ns", builder.namespace) + } + if builder.ownerName != "test-owner" { + t.Errorf("ownerName = %v, want test-owner", builder.ownerName) + } + if *builder.replicas != 1 { + t.Errorf("default replicas = %v, want 1", *builder.replicas) + } +} + +func TestStatefulSetBuilder_ChainedCalls(t *testing.T) { + builder := NewStatefulSetBuilder("default", "test", nil) + + // Test method chaining + result := builder. + WithName("test"). + WithImage("test:latest"). + WithReplicas(5). + WithLabels(map[string]string{"key": "value"}) + + if result != builder { + t.Error("Methods should return builder for chaining") + } + + sts, err := builder.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + + if sts.Name != "test" { + t.Errorf("Name not set correctly") + } + if *sts.Spec.Replicas != 5 { + t.Errorf("Replicas not set correctly") + } +} + +// TestStatefulSetBuilder_WithK8sSecret tests K8s Secret volume creation +func TestStatefulSetBuilder_WithK8sSecret(t *testing.T) { + builder := NewStatefulSetBuilder("default", "test-owner", nil) + + // Add a K8s secret (no CSI field) + k8sSecret := &secret.Ref{ + SecretName: "postgres-credentials", + Namespace: "default", + Keys: []string{"username", "password"}, + Ready: true, + Provider: "kubernetes", + } + + builder.WithName("test-sts"). + WithImage("splunk/splunk:9.1.0"). + WithSecret(k8sSecret) + + sts, err := builder.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + + // Verify K8s Secret volume was created + var secretVolume *corev1.Volume + for i, vol := range sts.Spec.Template.Spec.Volumes { + if vol.Name == "postgres-credentials" { + secretVolume = &sts.Spec.Template.Spec.Volumes[i] + break + } + } + + if secretVolume == nil { + t.Fatal("K8s Secret volume not created") + } + + // Verify it's a Secret volume (not CSI) + if secretVolume.Secret == nil { + t.Error("Volume should have Secret source") + } + + if secretVolume.Secret.SecretName != "postgres-credentials" { + t.Errorf("Secret name = %v, want postgres-credentials", secretVolume.Secret.SecretName) + } + + if secretVolume.CSI != nil { + t.Error("Volume should not have CSI source for K8s secret") + } +} + +// TestStatefulSetBuilder_WithCSISecret tests CSI Secret volume creation +func TestStatefulSetBuilder_WithCSISecret(t *testing.T) { + builder := NewStatefulSetBuilder("default", "test-owner", nil) + + // Add a CSI secret + csiSecret := &secret.Ref{ + SecretName: "vault-secrets", + Namespace: "default", + Keys: []string{"api-key", "token"}, + Ready: true, + Provider: "csi-vault", + CSI: &secret.CSIInfo{ + ProviderClass: "standalone-my-splunk-secrets", + Driver: "secrets-store.csi.k8s.io", + MountPath: "/mnt/secrets/vault", + Files: []string{"api-key", "token"}, + }, + } + + builder.WithName("test-sts"). + WithImage("splunk/splunk:9.1.0"). + WithSecret(csiSecret) + + sts, err := builder.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + + // Verify CSI volume was created + var csiVolume *corev1.Volume + for i, vol := range sts.Spec.Template.Spec.Volumes { + if vol.Name == "vault-secrets" { + csiVolume = &sts.Spec.Template.Spec.Volumes[i] + break + } + } + + if csiVolume == nil { + t.Fatal("CSI volume not created") + } + + // Verify it's a CSI volume (not Secret) + if csiVolume.CSI == nil { + t.Error("Volume should have CSI source") + } + + if csiVolume.CSI.Driver != "secrets-store.csi.k8s.io" { + t.Errorf("CSI driver = %v, want secrets-store.csi.k8s.io", csiVolume.CSI.Driver) + } + + if csiVolume.CSI.VolumeAttributes["secretProviderClass"] != "standalone-my-splunk-secrets" { + t.Errorf("SecretProviderClass = %v, want standalone-my-splunk-secrets", + csiVolume.CSI.VolumeAttributes["secretProviderClass"]) + } + + if csiVolume.Secret != nil { + t.Error("Volume should not have Secret source for CSI secret") + } + + // Verify CSI volume mount was created with correct path + var csiMount *corev1.VolumeMount + for i, mount := range sts.Spec.Template.Spec.Containers[0].VolumeMounts { + if mount.Name == "vault-secrets" { + csiMount = &sts.Spec.Template.Spec.Containers[0].VolumeMounts[i] + break + } + } + + if csiMount == nil { + t.Fatal("CSI volume mount not created") + } + + if csiMount.MountPath != "/mnt/secrets/vault" { + t.Errorf("Mount path = %v, want /mnt/secrets/vault", csiMount.MountPath) + } + + if !csiMount.ReadOnly { + t.Error("CSI mount should be read-only") + } +} + +// TestStatefulSetBuilder_WithHybridSecrets tests both K8s and CSI secrets +func TestStatefulSetBuilder_WithHybridSecrets(t *testing.T) { + builder := NewStatefulSetBuilder("default", "test-owner", nil) + + // Add both K8s secret and CSI secret + k8sSecret := &secret.Ref{ + SecretName: "k8s-secret", + Namespace: "default", + Keys: []string{"password"}, + Ready: true, + Provider: "kubernetes", + } + + csiSecret := &secret.Ref{ + SecretName: "csi-secret", + Namespace: "default", + Keys: []string{"api-key"}, + Ready: true, + Provider: "csi-vault", + CSI: &secret.CSIInfo{ + ProviderClass: "vault-secrets", + Driver: "secrets-store.csi.k8s.io", + MountPath: "/mnt/secrets", + Files: []string{"api-key"}, + }, + } + + builder.WithName("test-sts"). + WithImage("splunk/splunk:9.1.0"). + WithSecret(k8sSecret). + WithSecret(csiSecret) + + sts, err := builder.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + + // Should have 2 volumes + if len(sts.Spec.Template.Spec.Volumes) != 2 { + t.Errorf("Volume count = %v, want 2", len(sts.Spec.Template.Spec.Volumes)) + } + + // Verify K8s Secret volume exists + foundK8s := false + for _, vol := range sts.Spec.Template.Spec.Volumes { + if vol.Name == "k8s-secret" && vol.Secret != nil { + foundK8s = true + break + } + } + if !foundK8s { + t.Error("K8s Secret volume not found") + } + + // Verify CSI volume exists + foundCSI := false + for _, vol := range sts.Spec.Template.Spec.Volumes { + if vol.Name == "csi-secret" && vol.CSI != nil { + foundCSI = true + break + } + } + if !foundCSI { + t.Error("CSI volume not found") + } + + // Should have 1 volume mount (only for CSI) + if len(sts.Spec.Template.Spec.Containers[0].VolumeMounts) != 1 { + t.Errorf("VolumeMount count = %v, want 1 (only CSI)", len(sts.Spec.Template.Spec.Containers[0].VolumeMounts)) + } + + // Verify CSI mount exists + foundCSIMount := false + for _, mount := range sts.Spec.Template.Spec.Containers[0].VolumeMounts { + if mount.Name == "csi-secret" { + foundCSIMount = true + break + } + } + if !foundCSIMount { + t.Error("CSI volume mount not found") + } +} + +// TestStatefulSetBuilder_WithCertificate_CustomMountPath tests certificate with custom mount path +func TestStatefulSetBuilder_WithCertificate_CustomMountPath(t *testing.T) { + builder := NewStatefulSetBuilder("default", "test-owner", nil) + + // Add certificate with custom mount path (/mnt/tls for cert-manager compatibility) + cert := &certificate.Ref{ + SecretName: "splunk-tls", + Namespace: "default", + Ready: true, + Provider: "cert-manager", + MountPath: "/mnt/tls", + } + + builder.WithName("test-sts"). + WithImage("splunk/splunk:9.1.0"). + WithCertificate(cert) + + sts, err := builder.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + + // Verify certificate volume was created + var certVolume *corev1.Volume + for i, vol := range sts.Spec.Template.Spec.Volumes { + if vol.Name == "splunk-tls" { + certVolume = &sts.Spec.Template.Spec.Volumes[i] + break + } + } + + if certVolume == nil { + t.Fatal("Certificate volume not created") + } + + if certVolume.Secret == nil { + t.Error("Volume should have Secret source") + } + + if certVolume.Secret.SecretName != "splunk-tls" { + t.Errorf("Secret name = %v, want splunk-tls", certVolume.Secret.SecretName) + } + + // Verify certificate volume mount with custom path + var certMount *corev1.VolumeMount + for i, mount := range sts.Spec.Template.Spec.Containers[0].VolumeMounts { + if mount.Name == "splunk-tls" { + certMount = &sts.Spec.Template.Spec.Containers[0].VolumeMounts[i] + break + } + } + + if certMount == nil { + t.Fatal("Certificate volume mount not created") + } + + if certMount.MountPath != "/mnt/tls" { + t.Errorf("MountPath = %v, want /mnt/tls", certMount.MountPath) + } + + if !certMount.ReadOnly { + t.Error("Certificate mount should be read-only") + } +} + +// TestStatefulSetBuilder_WithCertificate_DefaultMountPath tests certificate with default mount path +func TestStatefulSetBuilder_WithCertificate_DefaultMountPath(t *testing.T) { + builder := NewStatefulSetBuilder("default", "test-owner", nil) + + // Add certificate without custom mount path (should use default) + cert := &certificate.Ref{ + SecretName: "splunk-ca-bundle", + Namespace: "default", + Ready: true, + Provider: "cert-manager", + // MountPath not specified - should default to /etc/certs/splunk-ca-bundle + } + + builder.WithName("test-sts"). + WithImage("splunk/splunk:9.1.0"). + WithCertificate(cert) + + sts, err := builder.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + + // Verify certificate volume mount uses default path + var certMount *corev1.VolumeMount + for i, mount := range sts.Spec.Template.Spec.Containers[0].VolumeMounts { + if mount.Name == "splunk-ca-bundle" { + certMount = &sts.Spec.Template.Spec.Containers[0].VolumeMounts[i] + break + } + } + + if certMount == nil { + t.Fatal("Certificate volume mount not created") + } + + expectedPath := "/etc/certs/splunk-ca-bundle" + if certMount.MountPath != expectedPath { + t.Errorf("MountPath = %v, want %v", certMount.MountPath, expectedPath) + } +} + +// TestStatefulSetBuilder_WithMultipleCertificates tests multiple certificates with different paths +func TestStatefulSetBuilder_WithMultipleCertificates(t *testing.T) { + builder := NewStatefulSetBuilder("default", "test-owner", nil) + + // Add TLS certificate + tlsCert := &certificate.Ref{ + SecretName: "splunk-tls", + Namespace: "default", + Ready: true, + Provider: "cert-manager", + MountPath: "/mnt/tls", + } + + // Add CA bundle certificate + caCert := &certificate.Ref{ + SecretName: "splunk-ca-bundle", + Namespace: "default", + Ready: true, + Provider: "cert-manager", + MountPath: "/mnt/ca-bundles", + } + + builder.WithName("test-sts"). + WithImage("splunk/splunk:9.1.0"). + WithCertificate(tlsCert). + WithCertificate(caCert) + + sts, err := builder.Build() + if err != nil { + t.Fatalf("Build() failed: %v", err) + } + + // Should have 2 volumes + if len(sts.Spec.Template.Spec.Volumes) != 2 { + t.Errorf("Volume count = %v, want 2", len(sts.Spec.Template.Spec.Volumes)) + } + + // Should have 2 volume mounts + if len(sts.Spec.Template.Spec.Containers[0].VolumeMounts) != 2 { + t.Errorf("VolumeMount count = %v, want 2", len(sts.Spec.Template.Spec.Containers[0].VolumeMounts)) + } + + // Verify TLS certificate mount + var tlsMount *corev1.VolumeMount + for i, mount := range sts.Spec.Template.Spec.Containers[0].VolumeMounts { + if mount.Name == "splunk-tls" { + tlsMount = &sts.Spec.Template.Spec.Containers[0].VolumeMounts[i] + break + } + } + + if tlsMount == nil { + t.Fatal("TLS certificate mount not found") + } + + if tlsMount.MountPath != "/mnt/tls" { + t.Errorf("TLS MountPath = %v, want /mnt/tls", tlsMount.MountPath) + } + + // Verify CA bundle mount + var caMount *corev1.VolumeMount + for i, mount := range sts.Spec.Template.Spec.Containers[0].VolumeMounts { + if mount.Name == "splunk-ca-bundle" { + caMount = &sts.Spec.Template.Spec.Containers[0].VolumeMounts[i] + break + } + } + + if caMount == nil { + t.Fatal("CA bundle mount not found") + } + + if caMount.MountPath != "/mnt/ca-bundles" { + t.Errorf("CA MountPath = %v, want /mnt/ca-bundles", caMount.MountPath) + } +} diff --git a/pkg/platform-sdk/services/certificate/provider_certmanager.go b/pkg/platform-sdk/services/certificate/provider_certmanager.go new file mode 100644 index 000000000..6a0ec7265 --- /dev/null +++ b/pkg/platform-sdk/services/certificate/provider_certmanager.go @@ -0,0 +1,265 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certificate + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// CertManagerProvider implements certificate provisioning via cert-manager. +// +// The provider: +// - Creates Certificate CRs in the cert-manager.io API group +// - Watches Certificate status for readiness +// - Extracts secret name from Certificate status +// - Returns Ready=true when cert-manager has issued the certificate +type CertManagerProvider struct { + client client.Client + logger logr.Logger +} + +// NewCertManagerProvider creates a new cert-manager provider. +func NewCertManagerProvider(client client.Client, logger logr.Logger) *CertManagerProvider { + return &CertManagerProvider{ + client: client, + logger: logger.WithName("certmanager-provider"), + } +} + +// Name returns the provider name. +func (p *CertManagerProvider) Name() string { + return "cert-manager" +} + +// EnsureCertificate ensures a cert-manager Certificate exists. +func (p *CertManagerProvider) EnsureCertificate(ctx context.Context, req Request) (*certificate.Ref, error) { + // Build Certificate CR (unstructured, since we don't import cert-manager types) + cert := &unstructured.Unstructured{} + cert.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "cert-manager.io", + Version: "v1", + Kind: "Certificate", + }) + + // Check if Certificate exists + err := p.client.Get(ctx, types.NamespacedName{ + Name: req.Name, + Namespace: req.Namespace, + }, cert) + + if err != nil { + if apierrors.IsNotFound(err) { + // Create Certificate CR + return p.createCertificate(ctx, req) + } + return nil, fmt.Errorf("failed to get Certificate: %w", err) + } + + // Certificate exists, check its status + return p.checkCertificateStatus(cert, req) +} + +// createCertificate creates a new cert-manager Certificate CR. +func (p *CertManagerProvider) createCertificate(ctx context.Context, req Request) (*certificate.Ref, error) { + p.logger.Info("Creating cert-manager Certificate", + "name", req.Name, + "namespace", req.Namespace, + "dnsNames", req.DNSNames, + ) + + // Build Certificate spec + spec := map[string]interface{}{ + "secretName": req.Name, + "dnsNames": req.DNSNames, + "duration": fmt.Sprintf("%ds", req.Duration), + "renewBefore": fmt.Sprintf("%ds", req.RenewBefore), + "usages": req.Usages, + } + + if len(req.IPAddresses) > 0 { + spec["ipAddresses"] = req.IPAddresses + } + + // Add issuerRef + if req.IssuerRef != nil { + issuerRef := map[string]interface{}{ + "name": req.IssuerRef.Name, + } + if req.IssuerRef.Kind != "" { + issuerRef["kind"] = req.IssuerRef.Kind + } else { + issuerRef["kind"] = "ClusterIssuer" + } + if req.IssuerRef.Group != "" { + issuerRef["group"] = req.IssuerRef.Group + } else { + issuerRef["group"] = "cert-manager.io" + } + spec["issuerRef"] = issuerRef + } + + // Create unstructured Certificate + cert := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "cert-manager.io/v1", + "kind": "Certificate", + "metadata": map[string]interface{}{ + "name": req.Name, + "namespace": req.Namespace, + }, + "spec": spec, + }, + } + + err := p.client.Create(ctx, cert) + if err != nil { + return nil, fmt.Errorf("failed to create Certificate: %w", err) + } + + p.logger.Info("Certificate CR created successfully", + "name", req.Name, + "namespace", req.Namespace, + ) + + // Return not ready (cert-manager needs time to issue) + return &certificate.Ref{ + SecretName: req.Name, + Namespace: req.Namespace, + Ready: false, + Provider: "cert-manager", + Error: "Certificate is being issued by cert-manager", + }, nil +} + +// checkCertificateStatus checks the status of a cert-manager Certificate. +func (p *CertManagerProvider) checkCertificateStatus(cert *unstructured.Unstructured, req Request) (*certificate.Ref, error) { + // Extract status + status, found, err := unstructured.NestedMap(cert.Object, "status") + if err != nil || !found { + return &certificate.Ref{ + SecretName: req.Name, + Namespace: req.Namespace, + Ready: false, + Provider: "cert-manager", + Error: "Certificate status not yet available", + }, nil + } + + // Check conditions + conditions, found, err := unstructured.NestedSlice(status, "conditions") + if err != nil || !found { + return &certificate.Ref{ + SecretName: req.Name, + Namespace: req.Namespace, + Ready: false, + Provider: "cert-manager", + Error: "Certificate conditions not yet available", + }, nil + } + + // Look for Ready condition + ready := false + errorMsg := "" + for _, cond := range conditions { + condMap, ok := cond.(map[string]interface{}) + if !ok { + continue + } + + condType, _, _ := unstructured.NestedString(condMap, "type") + if condType != "Ready" { + continue + } + + condStatus, _, _ := unstructured.NestedString(condMap, "status") + ready = (condStatus == string(metav1.ConditionTrue)) + + if !ready { + reason, _, _ := unstructured.NestedString(condMap, "reason") + message, _, _ := unstructured.NestedString(condMap, "message") + errorMsg = fmt.Sprintf("%s: %s", reason, message) + } + break + } + + ref := &certificate.Ref{ + SecretName: req.Name, + Namespace: req.Namespace, + Ready: ready, + Provider: "cert-manager", + } + + if !ready { + ref.Error = errorMsg + if errorMsg == "" { + ref.Error = "Certificate not yet ready" + } + } + + // Extract NotBefore and NotAfter from status if available + if ready { + if notBefore, found, _ := unstructured.NestedString(status, "notBefore"); found { + if t, err := time.Parse(time.RFC3339, notBefore); err == nil { + ref.NotBefore = &t + } + } + if notAfter, found, _ := unstructured.NestedString(status, "notAfter"); found { + if t, err := time.Parse(time.RFC3339, notAfter); err == nil { + ref.NotAfter = &t + } + } + if renewalTime, found, _ := unstructured.NestedString(status, "renewalTime"); found { + if t, err := time.Parse(time.RFC3339, renewalTime); err == nil { + ref.RenewalTime = &t + } + } + } + + p.logger.V(1).Info("Certificate status checked", + "name", req.Name, + "namespace", req.Namespace, + "ready", ready, + "error", errorMsg, + ) + + return ref, nil +} + +// GetSecret retrieves the certificate secret. +func (p *CertManagerProvider) GetSecret(ctx context.Context, namespace, name string) (*corev1.Secret, error) { + secret := &corev1.Secret{} + err := p.client.Get(ctx, types.NamespacedName{ + Name: name, + Namespace: namespace, + }, secret) + + if err != nil { + return nil, err + } + + return secret, nil +} diff --git a/pkg/platform-sdk/services/certificate/provider_selfsigned.go b/pkg/platform-sdk/services/certificate/provider_selfsigned.go new file mode 100644 index 000000000..fa11d3b12 --- /dev/null +++ b/pkg/platform-sdk/services/certificate/provider_selfsigned.go @@ -0,0 +1,242 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certificate + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "time" + + "github.com/go-logr/logr" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// SelfSignedProvider implements self-signed certificate generation. +// +// The provider: +// - Generates RSA key pairs +// - Creates self-signed X.509 certificates +// - Stores certificates in Kubernetes secrets +// - Returns Ready=true immediately (no external dependency) +type SelfSignedProvider struct { + client client.Client + logger logr.Logger +} + +// NewSelfSignedProvider creates a new self-signed certificate provider. +func NewSelfSignedProvider(client client.Client, logger logr.Logger) *SelfSignedProvider { + return &SelfSignedProvider{ + client: client, + logger: logger.WithName("selfsigned-provider"), + } +} + +// Name returns the provider name. +func (p *SelfSignedProvider) Name() string { + return "self-signed" +} + +// EnsureCertificate ensures a self-signed certificate exists. +func (p *SelfSignedProvider) EnsureCertificate(ctx context.Context, req Request) (*certificate.Ref, error) { + // Check if secret already exists + secret := &corev1.Secret{} + err := p.client.Get(ctx, types.NamespacedName{ + Name: req.Name, + Namespace: req.Namespace, + }, secret) + + if err == nil { + // Secret exists, validate it's a TLS secret with required keys + if secret.Type != corev1.SecretTypeTLS { + return nil, fmt.Errorf("existing secret %s is not a TLS secret", req.Name) + } + + if _, hasCert := secret.Data["tls.crt"]; !hasCert { + return nil, fmt.Errorf("existing secret %s is missing tls.crt", req.Name) + } + if _, hasKey := secret.Data["tls.key"]; !hasKey { + return nil, fmt.Errorf("existing secret %s is missing tls.key", req.Name) + } + + // Parse certificate to get expiry info + certPEM := secret.Data["tls.crt"] + block, _ := pem.Decode(certPEM) + if block != nil { + cert, err := x509.ParseCertificate(block.Bytes) + if err == nil { + ref := &certificate.Ref{ + SecretName: req.Name, + Namespace: req.Namespace, + Ready: true, + Provider: "self-signed", + NotBefore: &cert.NotBefore, + NotAfter: &cert.NotAfter, + } + + // Calculate renewal time (30 days before expiry by default) + renewDuration := time.Duration(req.RenewBefore) * time.Second + renewalTime := cert.NotAfter.Add(-renewDuration) + ref.RenewalTime = &renewalTime + + p.logger.V(1).Info("Using existing self-signed certificate", + "name", req.Name, + "namespace", req.Namespace, + "notAfter", cert.NotAfter, + ) + + return ref, nil + } + } + + // Fall through to return basic ref if we couldn't parse + p.logger.V(1).Info("Using existing certificate (couldn't parse for details)", + "name", req.Name, + "namespace", req.Namespace, + ) + + return &certificate.Ref{ + SecretName: req.Name, + Namespace: req.Namespace, + Ready: true, + Provider: "self-signed", + }, nil + } + + if !apierrors.IsNotFound(err) { + return nil, fmt.Errorf("failed to get secret: %w", err) + } + + // Secret doesn't exist, generate new certificate + return p.generateCertificate(ctx, req) +} + +// generateCertificate generates a new self-signed certificate. +func (p *SelfSignedProvider) generateCertificate(ctx context.Context, req Request) (*certificate.Ref, error) { + p.logger.Info("Generating self-signed certificate", + "name", req.Name, + "namespace", req.Namespace, + "dnsNames", req.DNSNames, + ) + + // Generate RSA key pair + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, fmt.Errorf("failed to generate RSA key: %w", err) + } + + // Create certificate template + serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128)) + if err != nil { + return nil, fmt.Errorf("failed to generate serial number: %w", err) + } + + notBefore := time.Now() + notAfter := notBefore.Add(time.Duration(req.Duration) * time.Second) + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Splunk Platform SDK"}, + CommonName: req.Name, + }, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + DNSNames: req.DNSNames, + } + + // Add IP addresses if specified + for _, ipStr := range req.IPAddresses { + if ip := net.ParseIP(ipStr); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } + } + + // Create self-signed certificate + certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %w", err) + } + + // Encode certificate to PEM + certPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certDER, + }) + + // Encode private key to PEM + keyPEM := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(privateKey), + }) + + // Create Kubernetes secret + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: req.Name, + Namespace: req.Namespace, + Labels: map[string]string{ + "app.kubernetes.io/managed-by": "platform-sdk", + "platform.splunk.com/component": "certificate", + }, + }, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + "tls.crt": certPEM, + "tls.key": keyPEM, + "ca.crt": certPEM, // Self-signed, so CA is same as cert + }, + } + + err = p.client.Create(ctx, secret) + if err != nil { + return nil, fmt.Errorf("failed to create secret: %w", err) + } + + p.logger.Info("Self-signed certificate generated successfully", + "name", req.Name, + "namespace", req.Namespace, + "notBefore", notBefore, + "notAfter", notAfter, + ) + + // Calculate renewal time + renewDuration := time.Duration(req.RenewBefore) * time.Second + renewalTime := notAfter.Add(-renewDuration) + + return &certificate.Ref{ + SecretName: req.Name, + Namespace: req.Namespace, + Ready: true, + Provider: "self-signed", + NotBefore: ¬Before, + NotAfter: ¬After, + RenewalTime: &renewalTime, + }, nil +} diff --git a/pkg/platform-sdk/services/certificate/resolver.go b/pkg/platform-sdk/services/certificate/resolver.go new file mode 100644 index 000000000..57a97bf6e --- /dev/null +++ b/pkg/platform-sdk/services/certificate/resolver.go @@ -0,0 +1,141 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package certificate implements certificate resolution with multiple providers. +package certificate + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + "github.com/splunk/splunk-operator/pkg/platform-sdk/services/config" + "github.com/splunk/splunk-operator/pkg/platform-sdk/services/interfaces" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Resolver implements certificate resolution with provider selection. +type Resolver struct { + client client.Client + configResolver interfaces.ConfigResolver + config *api.RuntimeConfig + logger logr.Logger + + // Providers + certManagerProvider Provider + selfSignedProvider Provider + + // Provider detection + hasCertManager bool +} + +// Provider is the interface for certificate providers. +type Provider interface { + EnsureCertificate(ctx context.Context, req Request) (*certificate.Ref, error) + Name() string +} + +// Request contains the details for certificate provisioning. +type Request struct { + Name string + Namespace string + DNSNames []string + IPAddresses []string + Duration int64 + RenewBefore int64 + Usages []string + IssuerRef *config.IssuerRef +} + +// NewResolver creates a new certificate resolver. +func NewResolver( + client client.Client, + configResolver interfaces.ConfigResolver, + hasCertManager bool, + cfg *api.RuntimeConfig, + logger logr.Logger, +) *Resolver { + r := &Resolver{ + client: client, + configResolver: configResolver, + config: cfg, + logger: logger.WithName("certificate-resolver"), + hasCertManager: hasCertManager, + } + + r.certManagerProvider = NewCertManagerProvider(client, logger) + r.selfSignedProvider = NewSelfSignedProvider(client, logger) + + return r +} + +// Resolve resolves a certificate for a resource. +func (r *Resolver) Resolve(ctx context.Context, binding certificate.Binding) (*certificate.Ref, error) { + if binding.Name == "" { + return nil, fmt.Errorf("certificate name is required") + } + if len(binding.DNSNames) == 0 { + return nil, fmt.Errorf("at least one DNS name is required") + } + + certConfig, err := r.configResolver.ResolveCertificateConfig(ctx, binding.Namespace) + if err != nil { + return nil, fmt.Errorf("failed to resolve certificate configuration: %w", err) + } + + provider, err := r.selectProvider(certConfig) + if err != nil { + return nil, err + } + + req := Request{ + Name: binding.Name, + Namespace: binding.Namespace, + DNSNames: binding.DNSNames, + IPAddresses: binding.IPAddresses, + Duration: certConfig.Duration, + RenewBefore: certConfig.RenewBefore, + Usages: certConfig.Usages, + IssuerRef: certConfig.IssuerRef, + } + + if binding.Duration != nil { + req.Duration = int64(binding.Duration.Seconds()) + } + if binding.RenewBefore != nil { + req.RenewBefore = int64(binding.RenewBefore.Seconds()) + } + + return provider.EnsureCertificate(ctx, req) +} + +// selectProvider selects the appropriate certificate provider. +func (r *Resolver) selectProvider(certConfig *config.ResolvedCertConfig) (Provider, error) { + switch certConfig.Provider { + case "cert-manager": + if !r.hasCertManager { + return r.selfSignedProvider, nil + } + if certConfig.IssuerRef == nil { + return nil, fmt.Errorf("cert-manager provider requires issuerRef configuration") + } + return r.certManagerProvider, nil + case "self-signed": + return r.selfSignedProvider, nil + default: + return r.selfSignedProvider, nil + } +} diff --git a/pkg/platform-sdk/services/config/resolver.go b/pkg/platform-sdk/services/config/resolver.go new file mode 100644 index 000000000..570222098 --- /dev/null +++ b/pkg/platform-sdk/services/config/resolver.go @@ -0,0 +1,491 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package config implements configuration resolution with hierarchical merging. +package config + +import ( + "context" + "fmt" + "sync" + + "github.com/go-logr/logr" + platformv4 "github.com/splunk/splunk-operator/api/platform/v4" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/config" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Resolver implements ConfigResolver interface. +// +// The resolver reads PlatformConfig and TenantConfig CRs and implements +// hierarchical merge logic: +// +// CR spec > TenantConfig > PlatformConfig > Built-in defaults +// +// Configuration is cached per namespace to avoid repeated API calls. +// Watches detect changes and invalidate caches automatically. +type Resolver struct { + client client.Client + config *api.RuntimeConfig + logger logr.Logger + + // Cache for resolved configurations per namespace + cache map[string]*cachedConfig + cacheMu sync.RWMutex + cacheValid bool + + // Watches + watcherStarted bool + watcherMu sync.Mutex +} + +// cachedConfig holds resolved configuration for a namespace. +type cachedConfig struct { + platformConfig *config.PlatformConfig + tenantConfig *config.TenantConfig +} + +// NewResolver creates a new ConfigResolver. +func NewResolver(client client.Client, cfg *api.RuntimeConfig, logger logr.Logger) *Resolver { + return &Resolver{ + client: client, + config: cfg, + logger: logger.WithName("config-resolver"), + cache: make(map[string]*cachedConfig), + cacheValid: false, + } +} + +// StartWatches starts watching for configuration changes. +func (r *Resolver) StartWatches(ctx context.Context) error { + r.watcherMu.Lock() + defer r.watcherMu.Unlock() + + if r.watcherStarted { + return nil + } + + r.logger.Info("Starting configuration watches") + + // TODO: Implement actual watches using controller-runtime + // For now, we'll just mark as started and rely on periodic reconciliation + // to pick up changes. In a full implementation, we'd use: + // - Watch PlatformConfig CRs + // - Watch TenantConfig CRs in relevant namespaces + // - Invalidate cache on changes + + r.watcherStarted = true + r.logger.Info("Configuration watches started") + + return nil +} + +// ResolveConfig reads configuration with proper hierarchy. +func (r *Resolver) ResolveConfig(ctx context.Context, key string, namespace string) (interface{}, error) { + // Get cached or fetch configuration + _, err := r.getConfig(ctx, namespace) + if err != nil { + return nil, fmt.Errorf("failed to get configuration: %w", err) + } + + // Parse the key and return the appropriate value + // For now, returning nil as we'll implement specific resolve methods + return nil, fmt.Errorf("generic config resolution not yet implemented, use specific methods") +} + +// GetPlatformConfig retrieves the PlatformConfig for a namespace. +func (r *Resolver) GetPlatformConfig(ctx context.Context, namespace string) (*config.PlatformConfig, error) { + cfg, err := r.getConfig(ctx, namespace) + if err != nil { + return nil, err + } + return cfg.platformConfig, nil +} + +// GetTenantConfig retrieves the TenantConfig for a namespace. +func (r *Resolver) GetTenantConfig(ctx context.Context, namespace string) (*config.TenantConfig, error) { + if !r.config.ClusterScoped { + // In namespace-scoped mode, only return config for our namespace + if namespace != r.config.Namespace { + return nil, fmt.Errorf("namespace %s not accessible in namespace-scoped mode", namespace) + } + } + + cfg, err := r.getConfig(ctx, namespace) + if err != nil { + return nil, err + } + return cfg.tenantConfig, nil +} + +// ResolveCertificateConfig resolves certificate configuration with hierarchy. +func (r *Resolver) ResolveCertificateConfig(ctx context.Context, namespace string) (*ResolvedCertConfig, error) { + cfg, err := r.getConfig(ctx, namespace) + if err != nil { + return nil, err + } + + resolved := &ResolvedCertConfig{ + // Layer 1: Built-in defaults + Provider: "self-signed", + Duration: 90 * 24 * 3600, // 90 days in seconds + RenewBefore: 30 * 24 * 3600, // 30 days in seconds + Usages: []string{"digital signature", "key encipherment", "server auth"}, + } + + // Layer 2: PlatformConfig (cluster-wide) + if cfg.platformConfig != nil { + if cfg.platformConfig.Spec.Certificates.Provider != "" { + resolved.Provider = cfg.platformConfig.Spec.Certificates.Provider + } + if cfg.platformConfig.Spec.Certificates.IssuerRef != nil { + resolved.IssuerRef = &IssuerRef{ + Name: cfg.platformConfig.Spec.Certificates.IssuerRef.Name, + Kind: cfg.platformConfig.Spec.Certificates.IssuerRef.Kind, + Group: cfg.platformConfig.Spec.Certificates.IssuerRef.Group, + } + } + if cfg.platformConfig.Spec.Certificates.Duration != nil { + resolved.Duration = int64(cfg.platformConfig.Spec.Certificates.GetCertificateDuration().Seconds()) + } + if cfg.platformConfig.Spec.Certificates.RenewBefore != nil { + resolved.RenewBefore = int64(cfg.platformConfig.Spec.Certificates.GetCertificateRenewBefore().Seconds()) + } + if len(cfg.platformConfig.Spec.Certificates.Usages) > 0 { + resolved.Usages = cfg.platformConfig.Spec.Certificates.Usages + } + } + + // Layer 3: TenantConfig (namespace-specific, cluster-scoped mode only) + if r.config.ClusterScoped && cfg.tenantConfig != nil { + if cfg.tenantConfig.Spec.Certificates.Provider != "" { + resolved.Provider = cfg.tenantConfig.Spec.Certificates.Provider + } + if cfg.tenantConfig.Spec.Certificates.IssuerRef != nil { + resolved.IssuerRef = &IssuerRef{ + Name: cfg.tenantConfig.Spec.Certificates.IssuerRef.Name, + Kind: cfg.tenantConfig.Spec.Certificates.IssuerRef.Kind, + Group: cfg.tenantConfig.Spec.Certificates.IssuerRef.Group, + } + } + if cfg.tenantConfig.Spec.Certificates.Duration != nil { + resolved.Duration = int64(cfg.tenantConfig.Spec.Certificates.GetCertificateDuration().Seconds()) + } + if cfg.tenantConfig.Spec.Certificates.RenewBefore != nil { + resolved.RenewBefore = int64(cfg.tenantConfig.Spec.Certificates.GetCertificateRenewBefore().Seconds()) + } + if len(cfg.tenantConfig.Spec.Certificates.Usages) > 0 { + resolved.Usages = cfg.tenantConfig.Spec.Certificates.Usages + } + } + + // Validate issuer ref for cert-manager + if resolved.Provider == "cert-manager" && resolved.IssuerRef == nil { + return nil, fmt.Errorf("cert-manager provider requires issuerRef in configuration") + } + + return resolved, nil +} + +// ResolveSecretConfig resolves secret configuration with hierarchy. +func (r *Resolver) ResolveSecretConfig(ctx context.Context, namespace string) (*ResolvedSecretConfig, error) { + cfg, err := r.getConfig(ctx, namespace) + if err != nil { + return nil, err + } + + resolved := &ResolvedSecretConfig{ + // Layer 1: Built-in defaults + Provider: "kubernetes", + GenerateFallback: false, + VersioningEnabled: true, + VersionsToKeep: 3, + // CSI defaults + CSIDriver: "secrets-store.csi.k8s.io", + CSINamingPattern: "${service}-${instance}-secrets", + CSIMountPath: "/mnt/secrets", + } + + // Layer 2: PlatformConfig + if cfg.platformConfig != nil { + if cfg.platformConfig.Spec.Secrets.Provider != "" { + resolved.Provider = cfg.platformConfig.Spec.Secrets.Provider + } + resolved.GenerateFallback = cfg.platformConfig.Spec.Secrets.GenerateFallback + resolved.VersioningEnabled = cfg.platformConfig.Spec.Secrets.VersioningEnabled + if cfg.platformConfig.Spec.Secrets.VersionsToKeep > 0 { + resolved.VersionsToKeep = cfg.platformConfig.Spec.Secrets.GetVersionsToKeep() + } + + // CSI configuration + if cfg.platformConfig.Spec.Secrets.CSI != nil { + csiConfig := cfg.platformConfig.Spec.Secrets.CSI + + // Driver + if csiConfig.Driver != "" { + resolved.CSIDriver = csiConfig.Driver + } + + // Provider + if csiConfig.DefaultProvider != "" { + resolved.CSIProvider = csiConfig.DefaultProvider + } + + // Naming pattern + if csiConfig.Naming != nil && csiConfig.Naming.Pattern != "" { + resolved.CSINamingPattern = csiConfig.Naming.Pattern + } + + // Mount path + if csiConfig.MountPath != "" { + resolved.CSIMountPath = csiConfig.MountPath + } + + // Vault configuration + if csiConfig.Vault != nil { + if csiConfig.Vault.Address != "" { + resolved.CSIVaultAddress = csiConfig.Vault.Address + } + if csiConfig.Vault.Role != "" { + resolved.CSIVaultRole = csiConfig.Vault.Role + } + } + + // AWS configuration + if csiConfig.AWS != nil { + if csiConfig.AWS.Region != "" { + resolved.CSIAWSRegion = csiConfig.AWS.Region + } + } + } + } + + // Layer 3: TenantConfig + if r.config.ClusterScoped && cfg.tenantConfig != nil { + if cfg.tenantConfig.Spec.Secrets.Provider != "" { + resolved.Provider = cfg.tenantConfig.Spec.Secrets.Provider + } + resolved.GenerateFallback = cfg.tenantConfig.Spec.Secrets.GenerateFallback + resolved.VersioningEnabled = cfg.tenantConfig.Spec.Secrets.VersioningEnabled + if cfg.tenantConfig.Spec.Secrets.VersionsToKeep > 0 { + resolved.VersionsToKeep = cfg.tenantConfig.Spec.Secrets.GetVersionsToKeep() + } + + // TenantConfig CSI overrides (if needed) + if cfg.tenantConfig.Spec.Secrets.CSI != nil { + csiConfig := cfg.tenantConfig.Spec.Secrets.CSI + + if csiConfig.Driver != "" { + resolved.CSIDriver = csiConfig.Driver + } + if csiConfig.DefaultProvider != "" { + resolved.CSIProvider = csiConfig.DefaultProvider + } + if csiConfig.Naming != nil && csiConfig.Naming.Pattern != "" { + resolved.CSINamingPattern = csiConfig.Naming.Pattern + } + if csiConfig.MountPath != "" { + resolved.CSIMountPath = csiConfig.MountPath + } + } + } + + return resolved, nil +} + +// ResolveObservabilityConfig resolves observability configuration with hierarchy. +func (r *Resolver) ResolveObservabilityConfig(ctx context.Context, namespace string) (*ResolvedObservabilityConfig, error) { + cfg, err := r.getConfig(ctx, namespace) + if err != nil { + return nil, err + } + + resolved := &ResolvedObservabilityConfig{ + // Layer 1: Built-in defaults + Enabled: true, + Provider: "opentelemetry", + OTelCollectorMode: "daemonset", + PrometheusPort: 9090, + PrometheusPath: "/metrics", + SamplingRate: 0.1, + } + + // Layer 2: PlatformConfig + if cfg.platformConfig != nil { + resolved.Enabled = cfg.platformConfig.Spec.Observability.Enabled + if cfg.platformConfig.Spec.Observability.Provider != "" { + resolved.Provider = cfg.platformConfig.Spec.Observability.Provider + } + if cfg.platformConfig.Spec.Observability.OTelCollectorMode != "" { + resolved.OTelCollectorMode = cfg.platformConfig.Spec.Observability.OTelCollectorMode + } + if cfg.platformConfig.Spec.Observability.PrometheusAnnotations.Port > 0 { + resolved.PrometheusPort = cfg.platformConfig.Spec.Observability.PrometheusAnnotations.Port + } + if cfg.platformConfig.Spec.Observability.PrometheusAnnotations.Path != "" { + resolved.PrometheusPath = cfg.platformConfig.Spec.Observability.PrometheusAnnotations.Path + } + if cfg.platformConfig.Spec.Observability.SamplingRate > 0 { + resolved.SamplingRate = cfg.platformConfig.Spec.Observability.SamplingRate + } + } + + // Layer 3: TenantConfig + if r.config.ClusterScoped && cfg.tenantConfig != nil { + resolved.Enabled = cfg.tenantConfig.Spec.Observability.Enabled + if cfg.tenantConfig.Spec.Observability.Provider != "" { + resolved.Provider = cfg.tenantConfig.Spec.Observability.Provider + } + if cfg.tenantConfig.Spec.Observability.OTelCollectorMode != "" { + resolved.OTelCollectorMode = cfg.tenantConfig.Spec.Observability.OTelCollectorMode + } + if cfg.tenantConfig.Spec.Observability.PrometheusAnnotations.Port > 0 { + resolved.PrometheusPort = cfg.tenantConfig.Spec.Observability.PrometheusAnnotations.Port + } + if cfg.tenantConfig.Spec.Observability.PrometheusAnnotations.Path != "" { + resolved.PrometheusPath = cfg.tenantConfig.Spec.Observability.PrometheusAnnotations.Path + } + if cfg.tenantConfig.Spec.Observability.SamplingRate > 0 { + resolved.SamplingRate = cfg.tenantConfig.Spec.Observability.SamplingRate + } + } + + return resolved, nil +} + +// getConfig retrieves and caches configuration for a namespace. +func (r *Resolver) getConfig(ctx context.Context, namespace string) (*cachedConfig, error) { + // Check cache first + r.cacheMu.RLock() + if r.cacheValid { + if cfg, ok := r.cache[namespace]; ok { + r.cacheMu.RUnlock() + return cfg, nil + } + } + r.cacheMu.RUnlock() + + // Cache miss or invalid, fetch from API + r.cacheMu.Lock() + defer r.cacheMu.Unlock() + + // Double-check after acquiring write lock + if r.cacheValid { + if cfg, ok := r.cache[namespace]; ok { + return cfg, nil + } + } + + cfg := &cachedConfig{} + + // Fetch PlatformConfig (cluster-scoped, single instance) + platformConfig, err := r.fetchPlatformConfig(ctx) + if err != nil && !apierrors.IsNotFound(err) { + return nil, fmt.Errorf("failed to fetch PlatformConfig: %w", err) + } + cfg.platformConfig = platformConfig + + // Fetch TenantConfig (namespace-scoped) + tenantConfig, err := r.fetchTenantConfig(ctx, namespace) + if err != nil && !apierrors.IsNotFound(err) { + return nil, fmt.Errorf("failed to fetch TenantConfig: %w", err) + } + cfg.tenantConfig = tenantConfig + + // Update cache + r.cache[namespace] = cfg + r.cacheValid = true + + r.logger.V(1).Info("Configuration cached", + "namespace", namespace, + "hasPlatformConfig", cfg.platformConfig != nil, + "hasTenantConfig", cfg.tenantConfig != nil, + ) + + return cfg, nil +} + +// fetchPlatformConfig fetches the PlatformConfig from the cluster. +func (r *Resolver) fetchPlatformConfig(ctx context.Context) (*config.PlatformConfig, error) { + // By convention, we look for a PlatformConfig named "default" + // Fetch using the API type (platformv4.PlatformConfig) + apiPlatformConfig := &platformv4.PlatformConfig{} + err := r.client.Get(ctx, types.NamespacedName{ + Name: "default", + }, apiPlatformConfig) + + if err != nil { + if apierrors.IsNotFound(err) { + r.logger.V(1).Info("PlatformConfig not found, using defaults") + return nil, nil + } + return nil, err + } + + // Convert from API type to internal SDK type + platformConfig := config.FromAPIType(apiPlatformConfig) + return platformConfig, nil +} + +// fetchTenantConfig fetches the TenantConfig for a namespace. +func (r *Resolver) fetchTenantConfig(ctx context.Context, namespace string) (*config.TenantConfig, error) { + if namespace == "" { + return nil, nil + } + + // By convention, we look for a TenantConfig named "default" in the namespace + // Fetch using the API type (platformv4.TenantConfig) + apiTenantConfig := &platformv4.TenantConfig{} + err := r.client.Get(ctx, types.NamespacedName{ + Name: "default", + Namespace: namespace, + }, apiTenantConfig) + + if err != nil { + if apierrors.IsNotFound(err) { + r.logger.V(1).Info("TenantConfig not found for namespace", "namespace", namespace) + return nil, nil + } + return nil, err + } + + // Convert from API type to internal SDK type + tenantConfig := config.TenantConfigFromAPIType(apiTenantConfig) + return tenantConfig, nil +} + +// InvalidateCache invalidates the configuration cache. +// Call this when configuration changes are detected. +func (r *Resolver) InvalidateCache() { + r.cacheMu.Lock() + defer r.cacheMu.Unlock() + + r.cacheValid = false + r.cache = make(map[string]*cachedConfig) + + r.logger.Info("Configuration cache invalidated") +} + +// InvalidateNamespace invalidates cache for a specific namespace. +func (r *Resolver) InvalidateNamespace(namespace string) { + r.cacheMu.Lock() + defer r.cacheMu.Unlock() + + delete(r.cache, namespace) + + r.logger.V(1).Info("Configuration cache invalidated for namespace", "namespace", namespace) +} diff --git a/pkg/platform-sdk/services/config/types.go b/pkg/platform-sdk/services/config/types.go new file mode 100644 index 000000000..febbd7b82 --- /dev/null +++ b/pkg/platform-sdk/services/config/types.go @@ -0,0 +1,93 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +// ResolvedCertConfig is the resolved certificate configuration. +type ResolvedCertConfig struct { + // Provider is the certificate provider ("cert-manager", "self-signed", or "user-provided"). + Provider string + + // NamingPattern is the pattern for certificate secret names. + // Example: "${service}-${instance}-tls" + NamingPattern string + + // UserProvidedNamespace is the namespace where user-provided certificates are located. + // Only used when Provider is "user-provided". + UserProvidedNamespace string + + // IssuerRef for cert-manager (nil if not using cert-manager). + IssuerRef *IssuerRef + + // Duration in seconds. + Duration int64 + + // RenewBefore in seconds. + RenewBefore int64 + + // Usages for the certificate. + Usages []string +} + +// IssuerRef references a cert-manager Issuer or ClusterIssuer. +type IssuerRef struct { + Name string + Kind string + Group string +} + +// ResolvedSecretConfig is the resolved secret configuration. +type ResolvedSecretConfig struct { + // Provider is the secret provider. + Provider string + + // GenerateFallback indicates whether to generate fallback secrets. + GenerateFallback bool + + // VersioningEnabled indicates if versioning is enabled for Splunk secrets. + VersioningEnabled bool + + // VersionsToKeep is how many versions to keep. + VersionsToKeep int + + // CSI configuration (only set when Provider is "csi"). + CSIDriver string // CSI driver name + CSIProvider string // CSI provider (vault, aws, azure, gcp) + CSINamingPattern string // Pattern for SecretProviderClass names + CSIMountPath string // Default mount path for CSI secrets + CSIVaultAddress string // Vault address (if using Vault) + CSIVaultRole string // Vault role (if using Vault) + CSIAWSRegion string // AWS region (if using AWS) +} + +// ResolvedObservabilityConfig is the resolved observability configuration. +type ResolvedObservabilityConfig struct { + // Enabled indicates if observability is enabled. + Enabled bool + + // Provider is the observability provider. + Provider string + + // OTelCollectorMode is the deployment mode for OTel Collector. + OTelCollectorMode string + + // PrometheusPort for metrics. + PrometheusPort int + + // PrometheusPath for metrics. + PrometheusPath string + + // SamplingRate for traces. + SamplingRate float64 +} diff --git a/pkg/platform-sdk/services/discovery/service.go b/pkg/platform-sdk/services/discovery/service.go new file mode 100644 index 000000000..ba16a3f7f --- /dev/null +++ b/pkg/platform-sdk/services/discovery/service.go @@ -0,0 +1,253 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "context" + "fmt" + "strconv" + + "github.com/go-logr/logr" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/discovery" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Service implements service discovery for Kubernetes and Splunk resources. +type Service struct { + client client.Client + config *api.RuntimeConfig + logger logr.Logger +} + +// NewService creates a new discovery service. +func NewService(client client.Client, config *api.RuntimeConfig, logger logr.Logger) *Service { + return &Service{ + client: client, + config: config, + logger: logger.WithName("discovery"), + } +} + +// DiscoverSplunk finds Splunk instances based on the provided selector. +func (s *Service) DiscoverSplunk(ctx context.Context, selector discovery.SplunkSelector) ([]discovery.SplunkEndpoint, error) { + logger := s.logger.WithValues("type", selector.Type, "namespace", selector.Namespace) + logger.V(1).Info("discovering Splunk instances") + + // Build label selector for Splunk services + labelSelector := s.buildSplunkLabelSelector(selector) + + // List services matching the selector + serviceList := &corev1.ServiceList{} + listOpts := []client.ListOption{ + client.MatchingLabelsSelector{Selector: labelSelector}, + } + + // Add namespace constraint if specified + if selector.Namespace != "" { + listOpts = append(listOpts, client.InNamespace(selector.Namespace)) + } else if !s.config.ClusterScoped { + // If not cluster-scoped, restrict to configured namespace + listOpts = append(listOpts, client.InNamespace(s.config.Namespace)) + } + + if err := s.client.List(ctx, serviceList, listOpts...); err != nil { + return nil, fmt.Errorf("failed to list services: %w", err) + } + + logger.V(1).Info("found services", "count", len(serviceList.Items)) + + // Convert services to Splunk endpoints + endpoints := make([]discovery.SplunkEndpoint, 0, len(serviceList.Items)) + for _, svc := range serviceList.Items { + endpoint, err := s.serviceToSplunkEndpoint(ctx, svc, selector) + if err != nil { + logger.Error(err, "failed to convert service to endpoint", "service", svc.Name) + continue + } + if endpoint != nil { + endpoints = append(endpoints, *endpoint) + } + } + + logger.V(1).Info("discovered Splunk endpoints", "count", len(endpoints)) + return endpoints, nil +} + +// Discover finds generic Kubernetes services. +func (s *Service) Discover(ctx context.Context, selector discovery.Selector) ([]discovery.Endpoint, error) { + logger := s.logger.WithValues("namespace", selector.Namespace) + logger.V(1).Info("discovering services") + + // Build label selector + labelSelector, err := labels.ValidatedSelectorFromSet(selector.Labels) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %w", err) + } + + // List services + serviceList := &corev1.ServiceList{} + listOpts := []client.ListOption{ + client.MatchingLabelsSelector{Selector: labelSelector}, + } + + if selector.Namespace != "" { + listOpts = append(listOpts, client.InNamespace(selector.Namespace)) + } else if !s.config.ClusterScoped { + listOpts = append(listOpts, client.InNamespace(s.config.Namespace)) + } + + if err := s.client.List(ctx, serviceList, listOpts...); err != nil { + return nil, fmt.Errorf("failed to list services: %w", err) + } + + logger.V(1).Info("found services", "count", len(serviceList.Items)) + + // Convert services to endpoints + endpoints := make([]discovery.Endpoint, 0, len(serviceList.Items)) + for _, svc := range serviceList.Items { + endpoint := s.serviceToEndpoint(svc, selector) + if endpoint != nil { + endpoints = append(endpoints, *endpoint) + } + } + + logger.V(1).Info("discovered endpoints", "count", len(endpoints)) + return endpoints, nil +} + +// buildSplunkLabelSelector builds a label selector for Splunk services. +func (s *Service) buildSplunkLabelSelector(selector discovery.SplunkSelector) labels.Selector { + // Base labels for Splunk resources managed by the operator + baseLabels := map[string]string{ + "app.kubernetes.io/managed-by": "splunk-operator", + "app.kubernetes.io/component": string(selector.Type), + } + + // Merge with additional labels + for k, v := range selector.Labels { + baseLabels[k] = v + } + + labelSelector, _ := labels.ValidatedSelectorFromSet(baseLabels) + return labelSelector +} + +// serviceToSplunkEndpoint converts a Kubernetes service to a Splunk endpoint. +func (s *Service) serviceToSplunkEndpoint(ctx context.Context, svc corev1.Service, selector discovery.SplunkSelector) (*discovery.SplunkEndpoint, error) { + // Find management port (default 8089) + var mgmtPort int32 = 8089 + + for _, port := range svc.Spec.Ports { + if port.Name == "mgmt" || port.Name == "management" { + mgmtPort = port.Port + break + } + } + + // Check for TLS configuration in annotations + scheme := "https" + if tlsDisabled, ok := svc.Annotations["splunk.com/tls-disabled"]; ok { + if disabled, _ := strconv.ParseBool(tlsDisabled); disabled { + scheme = "http" + } + } + + // Build endpoint URL + host := fmt.Sprintf("%s.%s.svc.cluster.local", svc.Name, svc.Namespace) + url := fmt.Sprintf("%s://%s:%d", scheme, host, mgmtPort) + + // Check if endpoint is ready + healthy := s.isServiceReady(ctx, svc) + + var health *discovery.HealthStatus + if healthy { + health = &discovery.HealthStatus{ + Healthy: true, + } + } + + endpoint := discovery.SplunkEndpoint{ + Name: svc.Name, + Type: selector.Type, + URL: url, + IsExternal: false, + Namespace: svc.Namespace, + Health: health, + Labels: svc.Labels, + } + + return &endpoint, nil +} + +// serviceToEndpoint converts a Kubernetes service to a generic endpoint. +func (s *Service) serviceToEndpoint(svc corev1.Service, selector discovery.Selector) *discovery.Endpoint { + // Find the primary port + var port int32 + if len(svc.Spec.Ports) > 0 { + // Use the first port by default + port = svc.Spec.Ports[0].Port + } + + scheme := "http" + if port == 443 || svc.Annotations["service.alpha.kubernetes.io/scheme"] == "https" { + scheme = "https" + } + + host := fmt.Sprintf("%s.%s.svc.cluster.local", svc.Name, svc.Namespace) + url := fmt.Sprintf("%s://%s:%d", scheme, host, port) + + return &discovery.Endpoint{ + Name: svc.Name, + URL: url, + Namespace: svc.Namespace, + Labels: svc.Labels, + Health: &discovery.HealthStatus{ + Healthy: true, // Generic services are assumed ready + }, + } +} + +// isServiceReady checks if a service has ready endpoints. +func (s *Service) isServiceReady(ctx context.Context, svc corev1.Service) bool { + // For headless services, check if there are any pods + if svc.Spec.ClusterIP == "None" { + podList := &corev1.PodList{} + listOpts := []client.ListOption{ + client.InNamespace(svc.Namespace), + client.MatchingLabels(svc.Spec.Selector), + } + + if err := s.client.List(ctx, podList, listOpts...); err != nil { + s.logger.Error(err, "failed to list pods for service", "service", svc.Name) + return false + } + + // Check if at least one pod is ready + for _, pod := range podList.Items { + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { + return true + } + } + } + return false + } + + // For regular services, assume ready if ClusterIP is assigned + return svc.Spec.ClusterIP != "" +} diff --git a/pkg/platform-sdk/services/interfaces.go b/pkg/platform-sdk/services/interfaces.go new file mode 100644 index 000000000..61b378f17 --- /dev/null +++ b/pkg/platform-sdk/services/interfaces.go @@ -0,0 +1,86 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package services provides the service layer implementations. +package services + +import ( + "github.com/go-logr/logr" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/builders" + builderspkg "github.com/splunk/splunk-operator/pkg/platform-sdk/services/builders" + certificatepkg "github.com/splunk/splunk-operator/pkg/platform-sdk/services/certificate" + configpkg "github.com/splunk/splunk-operator/pkg/platform-sdk/services/config" + discoverypkg "github.com/splunk/splunk-operator/pkg/platform-sdk/services/discovery" + "github.com/splunk/splunk-operator/pkg/platform-sdk/services/interfaces" + observabilitypkg "github.com/splunk/splunk-operator/pkg/platform-sdk/services/observability" + secretpkg "github.com/splunk/splunk-operator/pkg/platform-sdk/services/secret" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Re-export interfaces for convenience +type ConfigResolver = interfaces.ConfigResolver +type CertificateResolver = interfaces.CertificateResolver +type SecretResolver = interfaces.SecretResolver +type DiscoveryService = interfaces.DiscoveryService +type ObservabilityService = interfaces.ObservabilityService +type BackupService = interfaces.BackupService + +// Factory functions for services + +func NewConfigResolver(client client.Client, config *api.RuntimeConfig, logger logr.Logger) ConfigResolver { + return configpkg.NewResolver(client, config, logger) +} + +func NewCertificateResolver(client client.Client, configResolver ConfigResolver, hasCertManager bool, config *api.RuntimeConfig, logger logr.Logger) CertificateResolver { + return certificatepkg.NewResolver(client, configResolver, hasCertManager, config, logger) +} + +func NewSecretResolver(client client.Client, configResolver ConfigResolver, config *api.RuntimeConfig, logger logr.Logger) SecretResolver { + return secretpkg.NewResolver(client, configResolver, config, logger) +} + +func NewDiscoveryService(client client.Client, config *api.RuntimeConfig, logger logr.Logger) DiscoveryService { + return discoverypkg.NewService(client, config, logger) +} + +func NewObservabilityService(client client.Client, configResolver ConfigResolver, config *api.RuntimeConfig, logger logr.Logger) ObservabilityService { + return observabilitypkg.NewService(client, configResolver, config, logger) +} + +func NewBackupService(client client.Client, config *api.RuntimeConfig, logger logr.Logger) BackupService { + return &backupService{client: client, config: config, logger: logger} +} + +// Factory functions for builders + +func NewStatefulSetBuilder(namespace, ownerName string, observability ObservabilityService) builders.StatefulSetBuilder { + return builderspkg.NewStatefulSetBuilder(namespace, ownerName, observability) +} + +func NewServiceBuilder(namespace, ownerName string) builders.ServiceBuilder { + return builderspkg.NewServiceBuilder(namespace, ownerName) +} + +func NewConfigMapBuilder(namespace, ownerName string) builders.ConfigMapBuilder { + return builderspkg.NewConfigMapBuilder(namespace, ownerName) +} + +func NewPodBuilder(namespace, ownerName string) builders.PodBuilder { + return &podBuilder{namespace: namespace, ownerName: ownerName} +} + +func NewDeploymentBuilder(namespace, ownerName string, observability ObservabilityService) builders.DeploymentBuilder { + return builderspkg.NewDeploymentBuilder(namespace, ownerName, observability) +} diff --git a/pkg/platform-sdk/services/interfaces/interfaces.go b/pkg/platform-sdk/services/interfaces/interfaces.go new file mode 100644 index 000000000..c11b3c877 --- /dev/null +++ b/pkg/platform-sdk/services/interfaces/interfaces.go @@ -0,0 +1,79 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package interfaces defines the service interfaces to avoid import cycles. +package interfaces + +import ( + "context" + + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/discovery" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" + "github.com/splunk/splunk-operator/pkg/platform-sdk/services/config" +) + +// ConfigResolver reads platform and tenant configuration with proper hierarchy. +type ConfigResolver interface { + // StartWatches starts watching for configuration changes. + StartWatches(ctx context.Context) error + + // ResolveConfig reads configuration with proper hierarchy. + // Hierarchy: CR spec > TenantConfig > PlatformConfig > Built-in defaults + ResolveConfig(ctx context.Context, key string, namespace string) (interface{}, error) + + // ResolveCertificateConfig resolves certificate configuration. + ResolveCertificateConfig(ctx context.Context, namespace string) (*config.ResolvedCertConfig, error) + + // ResolveSecretConfig resolves secret configuration. + ResolveSecretConfig(ctx context.Context, namespace string) (*config.ResolvedSecretConfig, error) + + // ResolveObservabilityConfig resolves observability configuration. + ResolveObservabilityConfig(ctx context.Context, namespace string) (*config.ResolvedObservabilityConfig, error) +} + +// CertificateResolver orchestrates certificate provisioning. +type CertificateResolver interface { + // Resolve resolves a certificate for a resource. + Resolve(ctx context.Context, binding certificate.Binding) (*certificate.Ref, error) +} + +// SecretResolver validates that secrets exist and manages versioning. +type SecretResolver interface { + // Resolve validates that a secret exists. + Resolve(ctx context.Context, binding secret.Binding) (*secret.Ref, error) +} + +// DiscoveryService finds services in Kubernetes and external systems. +type DiscoveryService interface { + // DiscoverSplunk finds Splunk instances. + DiscoverSplunk(ctx context.Context, selector discovery.SplunkSelector) ([]discovery.SplunkEndpoint, error) + + // Discover finds generic Kubernetes services. + Discover(ctx context.Context, selector discovery.Selector) ([]discovery.Endpoint, error) +} + +// ObservabilityService adds monitoring capabilities. +type ObservabilityService interface { + // ShouldAddObservability checks if observability should be added. + ShouldAddObservability(ctx context.Context, namespace string) (bool, error) + + // GetObservabilityAnnotations returns the observability annotations. + GetObservabilityAnnotations(ctx context.Context, namespace string) (map[string]string, error) +} + +// BackupService manages backup and restore operations. +type BackupService interface { + // TODO: Define backup service interface +} diff --git a/pkg/platform-sdk/services/observability/service.go b/pkg/platform-sdk/services/observability/service.go new file mode 100644 index 000000000..a388ec616 --- /dev/null +++ b/pkg/platform-sdk/services/observability/service.go @@ -0,0 +1,113 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package observability + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + configpkg "github.com/splunk/splunk-operator/pkg/platform-sdk/services/config" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ConfigResolver interface to avoid import cycle. +type ConfigResolver interface { + ResolveObservabilityConfig(ctx context.Context, namespace string) (*configpkg.ResolvedObservabilityConfig, error) +} + +// Service implements observability functionality for Splunk resources. +type Service struct { + client client.Client + config *api.RuntimeConfig + configResolver ConfigResolver + logger logr.Logger +} + +// NewService creates a new observability service. +func NewService(client client.Client, configResolver ConfigResolver, config *api.RuntimeConfig, logger logr.Logger) *Service { + return &Service{ + client: client, + config: config, + configResolver: configResolver, + logger: logger.WithName("observability"), + } +} + +// ShouldAddObservability checks if observability should be added for a namespace. +func (s *Service) ShouldAddObservability(ctx context.Context, namespace string) (bool, error) { + logger := s.logger.WithValues("namespace", namespace) + logger.V(1).Info("checking if observability should be added") + + // Get observability configuration + obsConfig, err := s.configResolver.ResolveObservabilityConfig(ctx, namespace) + if err != nil { + return false, fmt.Errorf("failed to resolve observability config: %w", err) + } + + // Check if observability is enabled + enabled := obsConfig.Enabled + + logger.V(1).Info("observability check complete", "enabled", enabled) + return enabled, nil +} + +// GetObservabilityAnnotations returns the annotations to add for observability. +func (s *Service) GetObservabilityAnnotations(ctx context.Context, namespace string) (map[string]string, error) { + logger := s.logger.WithValues("namespace", namespace) + logger.V(1).Info("getting observability annotations") + + // Get observability configuration + obsConfig, err := s.configResolver.ResolveObservabilityConfig(ctx, namespace) + if err != nil { + return nil, fmt.Errorf("failed to resolve observability config: %w", err) + } + + // If not enabled, return empty annotations + if !obsConfig.Enabled { + return map[string]string{}, nil + } + + annotations := make(map[string]string) + + // Add Prometheus annotations for metrics + if obsConfig.PrometheusPort > 0 { + annotations["prometheus.io/scrape"] = "true" + annotations["prometheus.io/port"] = fmt.Sprintf("%d", obsConfig.PrometheusPort) + + if obsConfig.PrometheusPath != "" { + annotations["prometheus.io/path"] = obsConfig.PrometheusPath + } else { + annotations["prometheus.io/path"] = "/metrics" + } + } + + // Add OpenTelemetry annotations if OTel Collector is enabled + if obsConfig.Provider == "otel" || obsConfig.OTelCollectorMode != "" { + // Inject sidecar or daemonset mode + if obsConfig.OTelCollectorMode == "sidecar" { + annotations["sidecar.opentelemetry.io/inject"] = "true" + } + + // Add sampling rate if specified + if obsConfig.SamplingRate > 0 { + annotations["opentelemetry.io/sampling-rate"] = fmt.Sprintf("%.2f", obsConfig.SamplingRate) + } + } + + logger.V(1).Info("observability annotations generated", "count", len(annotations)) + return annotations, nil +} diff --git a/pkg/platform-sdk/services/secret/resolver.go b/pkg/platform-sdk/services/secret/resolver.go new file mode 100644 index 000000000..ca20c7d83 --- /dev/null +++ b/pkg/platform-sdk/services/secret/resolver.go @@ -0,0 +1,509 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package secret implements secret validation and versioned secret management. +package secret + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/go-logr/logr" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" + "github.com/splunk/splunk-operator/pkg/platform-sdk/services/config" + "github.com/splunk/splunk-operator/pkg/platform-sdk/services/interfaces" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Resolver implements secret validation and versioned secret management. +type Resolver struct { + client client.Client + configResolver interfaces.ConfigResolver + config *api.RuntimeConfig + logger logr.Logger +} + +// NewResolver creates a new secret resolver. +func NewResolver( + client client.Client, + configResolver interfaces.ConfigResolver, + cfg *api.RuntimeConfig, + logger logr.Logger, +) *Resolver { + return &Resolver{ + client: client, + configResolver: configResolver, + config: cfg, + logger: logger.WithName("secret-resolver"), + } +} + +// Resolve validates that a secret exists and has required keys. +func (r *Resolver) Resolve(ctx context.Context, binding secret.Binding) (*secret.Ref, error) { + if binding.Name == "" && binding.Service == "" { + return nil, fmt.Errorf("secret name or service/instance is required") + } + if len(binding.Keys) == 0 { + return nil, fmt.Errorf("at least one key is required") + } + + secretConfig, err := r.configResolver.ResolveSecretConfig(ctx, binding.Namespace) + if err != nil { + return nil, fmt.Errorf("failed to resolve secret configuration: %w", err) + } + + // Handle CSI secrets if CSI is configured in binding or provider is "csi" + if binding.CSI != nil || secretConfig.Provider == "csi" { + return r.resolveCSISecret(ctx, binding, secretConfig) + } + + // Handle versioned Splunk secrets + if binding.Type == secret.SecretTypeSplunk { + return r.resolveVersionedSecret(ctx, binding, secretConfig) + } + + // Handle generic secrets + return r.resolveGenericSecret(ctx, binding, secretConfig) +} + +// resolveCSISecret resolves CSI-based secrets via SecretProviderClass. +func (r *Resolver) resolveCSISecret(ctx context.Context, binding secret.Binding, cfg interface{}) (*secret.Ref, error) { + // Type assertion to get ResolvedSecretConfig + secretConfig, ok := cfg.(*config.ResolvedSecretConfig) + if !ok { + return nil, fmt.Errorf("invalid secret config type for CSI resolution") + } + + // Determine SecretProviderClass name + providerClassName := "" + if binding.CSI != nil && binding.CSI.ProviderClass != "" { + // Explicitly provided + providerClassName = binding.CSI.ProviderClass + } else if binding.Service != "" && binding.Instance != "" { + // Apply naming pattern + var err error + providerClassName, err = r.applyCSINamingPattern(secretConfig.CSINamingPattern, binding) + if err != nil { + return nil, fmt.Errorf("failed to apply CSI naming pattern: %w", err) + } + } else { + return nil, fmt.Errorf("CSI secret requires either explicit ProviderClass or Service+Instance for pattern resolution") + } + + // Check if SecretProviderClass exists + providerClass := &unstructured.Unstructured{} + providerClass.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "secrets-store.csi.x-k8s.io", + Version: "v1", + Kind: "SecretProviderClass", + }) + + err := r.client.Get(ctx, types.NamespacedName{ + Name: providerClassName, + Namespace: binding.Namespace, + }, providerClass) + + if err != nil { + if apierrors.IsNotFound(err) { + return &secret.Ref{ + SecretName: binding.Name, + Namespace: binding.Namespace, + Ready: false, + Provider: fmt.Sprintf("csi-%s", secretConfig.CSIProvider), + Error: fmt.Sprintf("SecretProviderClass %s not found in namespace %s", providerClassName, binding.Namespace), + }, nil + } + return nil, fmt.Errorf("failed to get SecretProviderClass: %w", err) + } + + // Determine mount path + mountPath := secretConfig.CSIMountPath + if binding.CSI != nil && binding.CSI.MountPath != "" { + mountPath = binding.CSI.MountPath + } + if mountPath == "" { + mountPath = "/mnt/secrets" + } + + // Determine CSI provider + csiProvider := secretConfig.CSIProvider + if binding.CSI != nil && binding.CSI.Provider != "" { + csiProvider = binding.CSI.Provider + } + if csiProvider == "" { + csiProvider = "vault" // Default + } + + // Determine CSI driver + csiDriver := secretConfig.CSIDriver + if csiDriver == "" { + csiDriver = "secrets-store.csi.k8s.io" + } + + // SecretProviderClass exists, return CSI reference + return &secret.Ref{ + SecretName: binding.Name, + Namespace: binding.Namespace, + Keys: binding.Keys, + Ready: true, + Provider: fmt.Sprintf("csi-%s", csiProvider), + CSI: &secret.CSIInfo{ + ProviderClass: providerClassName, + Driver: csiDriver, + MountPath: mountPath, + Files: binding.Keys, // Each key becomes a file in the mount + }, + }, nil +} + +// applyCSINamingPattern applies variable substitution to the CSI naming pattern. +func (r *Resolver) applyCSINamingPattern(pattern string, binding secret.Binding) (string, error) { + if pattern == "" { + pattern = "${service}-${instance}-secrets" + } + + result := pattern + result = strings.ReplaceAll(result, "${namespace}", binding.Namespace) + result = strings.ReplaceAll(result, "${service}", binding.Service) + result = strings.ReplaceAll(result, "${instance}", binding.Instance) + + // Check if all variables were replaced + if strings.Contains(result, "${") { + return "", fmt.Errorf("unresolved variables in pattern: %s", result) + } + + return result, nil +} + +// resolveGenericSecret validates a generic secret exists. +func (r *Resolver) resolveGenericSecret(ctx context.Context, binding secret.Binding, cfg interface{}) (*secret.Ref, error) { + k8sSecret := &corev1.Secret{} + err := r.client.Get(ctx, types.NamespacedName{ + Name: binding.Name, + Namespace: binding.Namespace, + }, k8sSecret) + + if err != nil { + if apierrors.IsNotFound(err) { + return &secret.Ref{ + SecretName: binding.Name, + Namespace: binding.Namespace, + Ready: false, + Error: fmt.Sprintf("secret %s not found", binding.Name), + }, nil + } + return nil, fmt.Errorf("failed to get secret: %w", err) + } + + // Validate required keys + missingKeys := []string{} + existingKeys := []string{} + for _, key := range binding.Keys { + if _, ok := k8sSecret.Data[key]; ok { + existingKeys = append(existingKeys, key) + } else { + missingKeys = append(missingKeys, key) + } + } + + if len(missingKeys) > 0 { + return &secret.Ref{ + SecretName: binding.Name, + Namespace: binding.Namespace, + Keys: existingKeys, + Ready: false, + Error: fmt.Sprintf("missing required keys: %v", missingKeys), + Provider: r.detectProvider(k8sSecret), + }, nil + } + + return &secret.Ref{ + SecretName: binding.Name, + Namespace: binding.Namespace, + Keys: existingKeys, + Ready: true, + Provider: r.detectProvider(k8sSecret), + }, nil +} + +// resolveVersionedSecret manages versioned Splunk secrets. +func (r *Resolver) resolveVersionedSecret(ctx context.Context, binding secret.Binding, cfg interface{}) (*secret.Ref, error) { + sourceSecretName := fmt.Sprintf("splunk-%s-secret", binding.Namespace) + + // Check if source secret exists + sourceSecret := &corev1.Secret{} + err := r.client.Get(ctx, types.NamespacedName{ + Name: sourceSecretName, + Namespace: binding.Namespace, + }, sourceSecret) + + if err != nil { + if apierrors.IsNotFound(err) { + return &secret.Ref{ + SecretName: binding.Name, + Namespace: binding.Namespace, + Ready: false, + Error: fmt.Sprintf("source secret %s not found", sourceSecretName), + }, nil + } + return nil, fmt.Errorf("failed to get source secret: %w", err) + } + + // Validate source secret has required keys + for _, key := range binding.Keys { + if _, ok := sourceSecret.Data[key]; !ok { + return &secret.Ref{ + SecretName: binding.Name, + Namespace: binding.Namespace, + Ready: false, + Error: fmt.Sprintf("source secret missing key: %s", key), + SourceSecretName: sourceSecretName, + }, nil + } + } + + // Find or create versioned secret + latestVersion, err := r.findLatestVersion(ctx, binding.Name, binding.Namespace) + if err != nil { + return nil, fmt.Errorf("failed to find latest version: %w", err) + } + + if latestVersion == 0 { + // Create v1 + return r.createVersionedSecret(ctx, binding, sourceSecret, 1) + } + + // Check if source has changed + versionedName := fmt.Sprintf("%s-v%d", binding.Name, latestVersion) + versionedSecret := &corev1.Secret{} + err = r.client.Get(ctx, types.NamespacedName{ + Name: versionedName, + Namespace: binding.Namespace, + }, versionedSecret) + + if err != nil { + if apierrors.IsNotFound(err) { + // Versioned secret deleted, recreate + return r.createVersionedSecret(ctx, binding, sourceSecret, latestVersion) + } + return nil, fmt.Errorf("failed to get versioned secret: %w", err) + } + + // Check if content has changed + if r.secretsEqual(sourceSecret, versionedSecret) { + // No change, return existing version + version := int(latestVersion) + return &secret.Ref{ + SecretName: versionedName, + Namespace: binding.Namespace, + Keys: binding.Keys, + Ready: true, + Provider: r.detectProvider(sourceSecret), + Version: &version, + SourceSecretName: sourceSecretName, + }, nil + } + + // Content changed, create new version + return r.createVersionedSecret(ctx, binding, sourceSecret, latestVersion+1) +} + +// findLatestVersion finds the latest version number for a secret. +func (r *Resolver) findLatestVersion(ctx context.Context, baseName, namespace string) (int, error) { + secretList := &corev1.SecretList{} + err := r.client.List(ctx, secretList, client.InNamespace(namespace)) + if err != nil { + return 0, err + } + + maxVersion := 0 + prefix := baseName + "-v" + for _, s := range secretList.Items { + if strings.HasPrefix(s.Name, prefix) { + versionStr := strings.TrimPrefix(s.Name, prefix) + if version, err := strconv.Atoi(versionStr); err == nil { + if version > maxVersion { + maxVersion = version + } + } + } + } + + return maxVersion, nil +} + +// createVersionedSecret creates a new versioned secret. +func (r *Resolver) createVersionedSecret(ctx context.Context, binding secret.Binding, sourceSecret *corev1.Secret, version int) (*secret.Ref, error) { + versionedName := fmt.Sprintf("%s-v%d", binding.Name, version) + + r.logger.Info("Creating versioned secret", + "name", versionedName, + "namespace", binding.Namespace, + "version", version, + ) + + // Copy data from source secret + data := make(map[string][]byte) + for k, v := range sourceSecret.Data { + data[k] = v + } + + versionedSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: versionedName, + Namespace: binding.Namespace, + Labels: map[string]string{ + "app.kubernetes.io/managed-by": "platform-sdk", + "platform.splunk.com/component": "secret", + "platform.splunk.com/secret-base": binding.Name, + "platform.splunk.com/secret-version": fmt.Sprintf("v%d", version), + }, + Annotations: map[string]string{ + "platform.splunk.com/source-secret": sourceSecret.Name, + }, + }, + Type: corev1.SecretTypeOpaque, + Data: data, + } + + err := r.client.Create(ctx, versionedSecret) + if err != nil { + if apierrors.IsAlreadyExists(err) { + // Secret already exists, return it + v := int(version) + return &secret.Ref{ + SecretName: versionedName, + Namespace: binding.Namespace, + Keys: binding.Keys, + Ready: true, + Provider: r.detectProvider(sourceSecret), + Version: &v, + SourceSecretName: sourceSecret.Name, + }, nil + } + return nil, fmt.Errorf("failed to create versioned secret: %w", err) + } + + // Clean up old versions + go r.cleanupOldVersions(context.Background(), binding.Name, binding.Namespace, version) + + v := int(version) + return &secret.Ref{ + SecretName: versionedName, + Namespace: binding.Namespace, + Keys: binding.Keys, + Ready: true, + Provider: r.detectProvider(sourceSecret), + Version: &v, + SourceSecretName: sourceSecret.Name, + }, nil +} + +// secretsEqual checks if two secrets have the same data. +func (r *Resolver) secretsEqual(a, b *corev1.Secret) bool { + if len(a.Data) != len(b.Data) { + return false + } + + for k, v := range a.Data { + if bv, ok := b.Data[k]; !ok || string(v) != string(bv) { + return false + } + } + + return true +} + +// cleanupOldVersions removes old secret versions beyond retention limit. +func (r *Resolver) cleanupOldVersions(ctx context.Context, baseName, namespace string, currentVersion int) { + // Default: keep last 3 versions + keepVersions := 3 + + secretList := &corev1.SecretList{} + err := r.client.List(ctx, secretList, client.InNamespace(namespace)) + if err != nil { + r.logger.Error(err, "Failed to list secrets for cleanup") + return + } + + // Find all versions + versions := []int{} + prefix := baseName + "-v" + for _, s := range secretList.Items { + if strings.HasPrefix(s.Name, prefix) { + versionStr := strings.TrimPrefix(s.Name, prefix) + if version, err := strconv.Atoi(versionStr); err == nil { + versions = append(versions, version) + } + } + } + + // Sort versions descending + sort.Sort(sort.Reverse(sort.IntSlice(versions))) + + // Delete old versions + for i, version := range versions { + if i >= keepVersions { + secretName := fmt.Sprintf("%s-v%d", baseName, version) + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + }, + } + err := r.client.Delete(ctx, secret) + if err != nil && !apierrors.IsNotFound(err) { + r.logger.Error(err, "Failed to delete old secret version", + "name", secretName, + "version", version, + ) + } else { + r.logger.Info("Deleted old secret version", + "name", secretName, + "version", version, + ) + } + } + } +} + +// detectProvider detects which provider created the secret. +func (r *Resolver) detectProvider(k8sSecret *corev1.Secret) string { + // Check annotations for ESO + if _, ok := k8sSecret.Annotations["external-secrets.io/created-by"]; ok { + return "external-secrets" + } + + // Check labels + if managedBy, ok := k8sSecret.Labels["app.kubernetes.io/managed-by"]; ok { + if managedBy == "external-secrets" { + return "external-secrets" + } + if managedBy == "platform-sdk" { + return "platform-sdk" + } + } + + return "kubernetes" +} diff --git a/pkg/platform-sdk/services/secret/resolver_test.go b/pkg/platform-sdk/services/secret/resolver_test.go new file mode 100644 index 000000000..13656c9b1 --- /dev/null +++ b/pkg/platform-sdk/services/secret/resolver_test.go @@ -0,0 +1,314 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package secret + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" + "github.com/splunk/splunk-operator/pkg/platform-sdk/services/config" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +// TestResolveGenericSecret tests basic K8s secret resolution +func TestResolveGenericSecret(t *testing.T) { + tests := []struct { + name string + secretExists bool + secretData map[string][]byte + requestedKeys []string + expectedReady bool + expectedError string + expectedKeys []string + }{ + { + name: "secret exists with all keys", + secretExists: true, + secretData: map[string][]byte{ + "username": []byte("admin"), + "password": []byte("changeme"), + }, + requestedKeys: []string{"username", "password"}, + expectedReady: true, + expectedKeys: []string{"username", "password"}, + }, + { + name: "secret missing some keys", + secretExists: true, + secretData: map[string][]byte{ + "username": []byte("admin"), + }, + requestedKeys: []string{"username", "password"}, + expectedReady: false, + expectedError: "missing required keys: [password]", + expectedKeys: []string{"username"}, + }, + { + name: "secret does not exist", + secretExists: false, + requestedKeys: []string{"username", "password"}, + expectedReady: false, + expectedError: "secret postgres-credentials not found", + }, + { + name: "secret exists with extra keys", + secretExists: true, + secretData: map[string][]byte{ + "username": []byte("admin"), + "password": []byte("changeme"), + "host": []byte("localhost"), + }, + requestedKeys: []string{"username", "password"}, + expectedReady: true, + expectedKeys: []string{"username", "password"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + // Create fake client + objs := []runtime.Object{} + if tt.secretExists { + k8sSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres-credentials", + Namespace: "default", + }, + Data: tt.secretData, + } + objs = append(objs, k8sSecret) + } + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(objs...). + Build() + + // Create mock config resolver + mockConfigResolver := &mockConfigResolver{ + secretConfig: &config.ResolvedSecretConfig{ + Provider: "kubernetes", + VersioningEnabled: true, + VersionsToKeep: 3, + }, + } + + // Create resolver + resolver := NewResolver( + fakeClient, + mockConfigResolver, + &api.RuntimeConfig{}, + logr.Discard(), + ) + + // Resolve secret + binding := secret.Binding{ + Name: "postgres-credentials", + Namespace: "default", + Type: secret.SecretTypeGeneric, + Keys: tt.requestedKeys, + } + + ref, err := resolver.Resolve(ctx, binding) + if err != nil { + t.Fatalf("Resolve() error = %v", err) + } + + // Verify results + if ref.Ready != tt.expectedReady { + t.Errorf("Ready = %v, want %v", ref.Ready, tt.expectedReady) + } + + if tt.expectedError != "" && ref.Error != tt.expectedError { + t.Errorf("Error = %v, want %v", ref.Error, tt.expectedError) + } + + if tt.expectedReady { + if len(ref.Keys) != len(tt.expectedKeys) { + t.Errorf("Keys length = %v, want %v", len(ref.Keys), len(tt.expectedKeys)) + } + for _, key := range tt.expectedKeys { + if !ref.HasKey(key) { + t.Errorf("Missing expected key: %s", key) + } + } + } + + if ref.SecretName != "postgres-credentials" { + t.Errorf("SecretName = %v, want postgres-credentials", ref.SecretName) + } + + if ref.Namespace != "default" { + t.Errorf("Namespace = %v, want default", ref.Namespace) + } + }) + } +} + +// TestResolveVersionedSecret tests Splunk secret versioning +func TestResolveVersionedSecret(t *testing.T) { + tests := []struct { + name string + sourceExists bool + sourceData map[string][]byte + requestedKeys []string + expectedVersion int + expectedReady bool + expectedError string + }{ + { + name: "create initial version", + sourceExists: true, + sourceData: map[string][]byte{ + "password": []byte("admin123"), + "hec_token": []byte("abc-123"), + "pass4SymmKey": []byte("symm-key"), + }, + requestedKeys: []string{"password", "hec_token"}, + expectedVersion: 1, + expectedReady: true, + }, + { + name: "source secret not found", + sourceExists: false, + requestedKeys: []string{"password"}, + expectedReady: false, + expectedError: "source secret splunk-default-secret not found", + }, + { + name: "source missing required key", + sourceExists: true, + sourceData: map[string][]byte{ + "password": []byte("admin123"), + }, + requestedKeys: []string{"password", "hec_token"}, + expectedReady: false, + expectedError: "source secret missing key: hec_token", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + // Create fake client + objs := []runtime.Object{} + if tt.sourceExists { + sourceSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-default-secret", + Namespace: "default", + }, + Data: tt.sourceData, + } + objs = append(objs, sourceSecret) + } + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(objs...). + Build() + + // Create mock config resolver + mockConfigResolver := &mockConfigResolver{ + secretConfig: &config.ResolvedSecretConfig{ + Provider: "kubernetes", + VersioningEnabled: true, + VersionsToKeep: 3, + }, + } + + // Create resolver + resolver := NewResolver( + fakeClient, + mockConfigResolver, + &api.RuntimeConfig{}, + logr.Discard(), + ) + + // Resolve Splunk secret + binding := secret.Binding{ + Name: "splunk-standalone-secret", + Namespace: "default", + Type: secret.SecretTypeSplunk, + Keys: tt.requestedKeys, + } + + ref, err := resolver.Resolve(ctx, binding) + if err != nil { + t.Fatalf("Resolve() error = %v", err) + } + + // Verify results + if ref.Ready != tt.expectedReady { + t.Errorf("Ready = %v, want %v", ref.Ready, tt.expectedReady) + } + + if tt.expectedError != "" && ref.Error != tt.expectedError { + t.Errorf("Error = %v, want %v", ref.Error, tt.expectedError) + } + + if tt.expectedReady { + if ref.Version == nil || *ref.Version != tt.expectedVersion { + t.Errorf("Version = %v, want %v", ref.Version, tt.expectedVersion) + } + + expectedSecretName := "splunk-standalone-secret-v1" + if ref.SecretName != expectedSecretName { + t.Errorf("SecretName = %v, want %v", ref.SecretName, expectedSecretName) + } + + if ref.SourceSecretName != "splunk-default-secret" { + t.Errorf("SourceSecretName = %v, want splunk-default-secret", ref.SourceSecretName) + } + } + }) + } +} + +// mockConfigResolver implements interfaces.ConfigResolver for testing +type mockConfigResolver struct { + secretConfig *config.ResolvedSecretConfig +} + +func (m *mockConfigResolver) StartWatches(ctx context.Context) error { + return nil +} + +func (m *mockConfigResolver) ResolveConfig(ctx context.Context, key string, namespace string) (interface{}, error) { + return nil, nil +} + +func (m *mockConfigResolver) ResolveSecretConfig(ctx context.Context, namespace string) (*config.ResolvedSecretConfig, error) { + return m.secretConfig, nil +} + +func (m *mockConfigResolver) ResolveCertificateConfig(ctx context.Context, namespace string) (*config.ResolvedCertConfig, error) { + return nil, nil +} + +func (m *mockConfigResolver) ResolveObservabilityConfig(ctx context.Context, namespace string) (*config.ResolvedObservabilityConfig, error) { + return nil, nil +} diff --git a/pkg/platform-sdk/services/stubs.go b/pkg/platform-sdk/services/stubs.go new file mode 100644 index 000000000..2cc066200 --- /dev/null +++ b/pkg/platform-sdk/services/stubs.go @@ -0,0 +1,81 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package services + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/builders" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Service stubs (will be implemented in subsequent commits) + +type certificateResolver struct { + client client.Client + configResolver ConfigResolver + config *api.RuntimeConfig + logger logr.Logger +} + +func (r *certificateResolver) Resolve(ctx context.Context, binding certificate.Binding) (*certificate.Ref, error) { + return nil, fmt.Errorf("not yet implemented") +} + +type secretResolver struct { + client client.Client + configResolver ConfigResolver + config *api.RuntimeConfig + logger logr.Logger +} + +func (r *secretResolver) Resolve(ctx context.Context, binding secret.Binding) (*secret.Ref, error) { + return nil, fmt.Errorf("not yet implemented") +} + +type backupService struct { + client client.Client + config *api.RuntimeConfig + logger logr.Logger +} + +// Builder stubs + +type podBuilder struct { + namespace string + ownerName string +} + +func (b *podBuilder) WithContainers(containers []corev1.Container) builders.PodBuilder { return b } +func (b *podBuilder) WithInitContainers(initContainers []corev1.Container) builders.PodBuilder { + return b +} +func (b *podBuilder) WithVolumes(volumes []corev1.Volume) builders.PodBuilder { return b } +func (b *podBuilder) WithServiceAccountName(serviceAccountName string) builders.PodBuilder { return b } +func (b *podBuilder) WithSecurityContext(securityContext *corev1.PodSecurityContext) builders.PodBuilder { + return b +} +func (b *podBuilder) WithAffinity(affinity *corev1.Affinity) builders.PodBuilder { return b } +func (b *podBuilder) WithTolerations(tolerations []corev1.Toleration) builders.PodBuilder { return b } +func (b *podBuilder) WithNodeSelector(nodeSelector map[string]string) builders.PodBuilder { return b } +func (b *podBuilder) WithLabels(labels map[string]string) builders.PodBuilder { return b } +func (b *podBuilder) WithAnnotations(annotations map[string]string) builders.PodBuilder { return b } +func (b *podBuilder) Build() (*corev1.PodSpec, error) { return nil, fmt.Errorf("not yet implemented") } diff --git a/pkg/splunk/enterprise/configuration.go b/pkg/splunk/enterprise/configuration.go index c9cc6838b..e4e32cc00 100644 --- a/pkg/splunk/enterprise/configuration.go +++ b/pkg/splunk/enterprise/configuration.go @@ -25,6 +25,7 @@ import ( "strconv" "strings" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" orderedmap "github.com/wk8/go-ordered-map/v2" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -410,6 +411,11 @@ func validateCommonSplunkSpec(ctx context.Context, c splcommon.ControllerClient, return err } + err = validateSplunkGeneralTerms() + if err != nil { + return err + } + // if not provided, set default values for imagePullSecrets err = ValidateImagePullSecrets(ctx, c, cr, spec) if err != nil { @@ -687,8 +693,22 @@ func addProbeConfigMapVolume(configMap *corev1.ConfigMap, statefulSet *appsv1.St }) } +// getSplunkStatefulSetWithSDK returns a Kubernetes StatefulSet object for Splunk instances with Platform SDK integration. +// This function extends getSplunkStatefulSet by integrating the SecretAdapter for secret management. +func getSplunkStatefulSetWithSDK(ctx context.Context, client splcommon.ControllerClient, sdkRuntime api.Runtime, cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec, instanceType InstanceType, replicas int32, extraEnv []corev1.EnvVar) (*appsv1.StatefulSet, error) { + // Use the existing function to create the base statefulset + // but we'll override the secret resolution logic below + return getSplunkStatefulSetWithAdapter(ctx, client, sdkRuntime, cr, spec, instanceType, replicas, extraEnv) +} + // getSplunkStatefulSet returns a Kubernetes StatefulSet object for Splunk instances configured for a Splunk Enterprise resource. func getSplunkStatefulSet(ctx context.Context, client splcommon.ControllerClient, cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec, instanceType InstanceType, replicas int32, extraEnv []corev1.EnvVar) (*appsv1.StatefulSet, error) { + // Call the internal implementation without SDK + return getSplunkStatefulSetWithAdapter(ctx, client, nil, cr, spec, instanceType, replicas, extraEnv) +} + +// getSplunkStatefulSetWithAdapter is the internal implementation that handles both SDK and non-SDK modes. +func getSplunkStatefulSetWithAdapter(ctx context.Context, client splcommon.ControllerClient, sdkRuntime api.Runtime, cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec, instanceType InstanceType, replicas int32, extraEnv []corev1.EnvVar) (*appsv1.StatefulSet, error) { // prepare misc values ports := splcommon.SortContainerPorts(getSplunkContainerPorts(instanceType)) // note that port order is important for tests @@ -783,9 +803,47 @@ func getSplunkStatefulSet(ctx context.Context, client splcommon.ControllerClient splcommon.AppendParentMeta(statefulSet.Spec.Template.GetObjectMeta(), cr.GetObjectMeta()) // retrieve the secret to upload to the statefulSet pod - statefulSetSecret, err := splutil.GetLatestVersionedSecret(ctx, client, cr, cr.GetNamespace(), statefulSet.GetName()) - if err != nil || statefulSetSecret == nil { - return statefulSet, err + // Always use Platform SDK when runtime is available + var statefulSetSecret *corev1.Secret + + if sdkRuntime != nil { + // Use Platform SDK secret management + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("getSplunkStatefulSetWithAdapter") + scopedLog.Info("Using Platform SDK for secret management", + "cr", cr.GetName(), + "instanceType", instanceType.ToString()) + + // Create SDK reconcile context + rctx := sdkRuntime.NewReconcileContext(ctx, cr.GetNamespace(), cr.GetName()) + + // Create secret adapter + adapter := NewSecretAdapter( + true, // SDK enabled + rctx, + client, + cr.GetNamespace(), + statefulSet.GetName(), + ) + + // Resolve secret via adapter + statefulSetSecret, _, err = adapter.GetSplunkSecret(ctx) + if err != nil || statefulSetSecret == nil { + scopedLog.Error(err, "Failed to get secret via Platform SDK", + "cr", cr.GetName(), + "error", err) + return statefulSet, err + } + + scopedLog.Info("Secret resolved via Platform SDK", + "secretName", statefulSetSecret.GetName(), + "cr", cr.GetName()) + } else { + // Use legacy secret management + statefulSetSecret, err = splutil.GetLatestVersionedSecret(ctx, client, cr, cr.GetNamespace(), statefulSet.GetName()) + if err != nil || statefulSetSecret == nil { + return statefulSet, err + } } // update statefulset's pod template with common splunk pod config @@ -937,6 +995,10 @@ func updateSplunkPodTemplateWithConfig(ctx context.Context, client splcommon.Con if instanceType == SplunkStandalone && (len(spec.ClusterMasterRef.Name) > 0 || len(spec.ClusterManagerRef.Name) > 0) { role = SplunkSearchHead.ToRole() } + domainName := os.Getenv("CLUSTER_DOMAIN") + if domainName == "" { + domainName = "cluster.local" + } env := []corev1.EnvVar{ {Name: "SPLUNK_HOME", Value: "/opt/splunk"}, {Name: "SPLUNK_START_ARGS", Value: "--accept-license"}, @@ -1154,6 +1216,43 @@ func getStartupProbe(ctx context.Context, cr splcommon.MetaObject, instanceType return startupProbe } +// getSplunkPodSecurityContext returns the pod security context for Splunk containers. +func getSplunkPodSecurityContext(spec *enterpriseApi.CommonSplunkSpec) *corev1.PodSecurityContext { + runAsUser := int64(41812) + fsGroup := int64(41812) + runAsNonRoot := true + fsGroupChangePolicy := corev1.FSGroupChangeOnRootMismatch + + return &corev1.PodSecurityContext{ + RunAsUser: &runAsUser, + FSGroup: &fsGroup, + RunAsNonRoot: &runAsNonRoot, + FSGroupChangePolicy: &fsGroupChangePolicy, + } +} + +// getSplunkSecurityContext returns the container security context for Splunk containers. +func getSplunkSecurityContext(spec *enterpriseApi.CommonSplunkSpec) *corev1.SecurityContext { + runAsUser := int64(41812) + runAsNonRoot := true + allowPrivilegeEscalation := false + privileged := false + + return &corev1.SecurityContext{ + RunAsUser: &runAsUser, + RunAsNonRoot: &runAsNonRoot, + AllowPrivilegeEscalation: &allowPrivilegeEscalation, + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + Add: []corev1.Capability{"NET_BIND_SERVICE"}, + }, + Privileged: &privileged, + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + } +} + // getProbeWithConfigUpdates Validates probe values and updates them func getProbeWithConfigUpdates(defaultProbe *corev1.Probe, configuredProbe *enterpriseApi.Probe, configuredDelay int32) *corev1.Probe { if configuredProbe != nil { diff --git a/pkg/splunk/enterprise/configuration_test.go b/pkg/splunk/enterprise/configuration_test.go index 2c92e3ec4..f570bfe90 100644 --- a/pkg/splunk/enterprise/configuration_test.go +++ b/pkg/splunk/enterprise/configuration_test.go @@ -286,7 +286,7 @@ func TestSmartstoreApplyStandaloneFailsOnInvalidSmartStoreConfig(t *testing.T) { client := spltest.NewMockClient() - _, err := ApplyStandalone(context.Background(), client, &cr) + _, err := ApplyStandalone(context.Background(), client, nil, &cr) if err == nil { t.Errorf("ApplyStandalone should fail on invalid smartstore config") } diff --git a/pkg/splunk/enterprise/secret_adapter.go b/pkg/splunk/enterprise/secret_adapter.go new file mode 100644 index 000000000..5e26e4f1f --- /dev/null +++ b/pkg/splunk/enterprise/secret_adapter.go @@ -0,0 +1,223 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. + +package enterprise + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" +) + +// SecretAdapter provides a unified interface for secret management, +// supporting both legacy Kubernetes secrets and Platform SDK secret resolution. +// +// This adapter allows gradual migration from the legacy secret management +// to the Platform SDK without breaking existing deployments. +type SecretAdapter struct { + // sdkEnabled indicates whether to use Platform SDK for secret resolution + sdkEnabled bool + + // rctx is the Platform SDK reconcile context (only set if sdkEnabled=true) + rctx api.ReconcileContext + + // client is the Kubernetes client for direct secret access + client client.Client + + // namespace is the namespace for secret resolution + namespace string + + // crName is the name of the CR (for SDK secret naming) + crName string +} + +// NewSecretAdapter creates a new SecretAdapter. +// +// Parameters: +// - sdkEnabled: whether to use Platform SDK (set to false for legacy mode) +// - rctx: Platform SDK reconcile context (can be nil if sdkEnabled=false) +// - client: Kubernetes client +// - namespace: namespace for secrets +// - crName: name of the CR +func NewSecretAdapter( + sdkEnabled bool, + rctx api.ReconcileContext, + client client.Client, + namespace string, + crName string, +) *SecretAdapter { + return &SecretAdapter{ + sdkEnabled: sdkEnabled, + rctx: rctx, + client: client, + namespace: namespace, + crName: crName, + } +} + +// GetSplunkSecret retrieves the Splunk secret, using either Platform SDK +// or legacy secret resolution based on configuration. +// +// The secret contains: +// - password: Splunk admin password +// - hec_token: HTTP Event Collector token +// - pass4SymmKey: cluster security key +// - idxc_secret: indexer cluster secret (optional) +// - shc_secret: search head cluster secret (optional) +func (a *SecretAdapter) GetSplunkSecret(ctx context.Context) (*corev1.Secret, *secret.Ref, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("SecretAdapter.GetSplunkSecret") + + if a.sdkEnabled && a.rctx != nil { + scopedLog.Info("Using Platform SDK for secret resolution", "cr", a.crName) + return a.getSecretViaSDK(ctx) + } + + scopedLog.Info("Using legacy secret resolution", "cr", a.crName) + secret, err := a.getSecretLegacy(ctx) + return secret, nil, err +} + +// getSecretViaSDK uses the Platform SDK to resolve secrets. +// +// This provides: +// - Pluggable providers (K8s native, ESO, Vault, etc.) +// - Secret versioning for rolling updates +// - Automatic secret rotation +// - External secret synchronization detection +func (a *SecretAdapter) getSecretViaSDK(ctx context.Context) (*corev1.Secret, *secret.Ref, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("getSecretViaSDK") + + // Define required secret keys for Splunk + requiredKeys := []string{ + "password", // Admin password + "hec_token", // HTTP Event Collector token + "pass4SymmKey", // Cluster security key + } + + // Resolve secret via Platform SDK + secretRef, err := a.rctx.ResolveSecret(secret.Binding{ + Name: fmt.Sprintf("%s-credentials", a.crName), + Namespace: a.namespace, + Type: secret.SecretTypeSplunk, + Keys: requiredKeys, + }) + if err != nil { + scopedLog.Error(err, "Failed to resolve secret via Platform SDK", + "cr", a.crName, + "namespace", a.namespace) + return nil, nil, fmt.Errorf("SDK secret resolution failed: %w", err) + } + + // Check if secret is ready + if !secretRef.Ready { + scopedLog.Info("Secret not ready yet", + "cr", a.crName, + "secretName", secretRef.SecretName, + "provider", secretRef.Provider, + "error", secretRef.Error) + return nil, secretRef, fmt.Errorf("secret not ready: %s", secretRef.Error) + } + + scopedLog.Info("Secret resolved successfully", + "cr", a.crName, + "secretName", secretRef.SecretName, + "provider", secretRef.Provider, + "version", secretRef.Version, + "keys", secretRef.Keys) + + // Emit event for secret resolution (if EventRecorder is available) + if eventRecorder := a.rctx.EventRecorder(); eventRecorder != nil { + eventRecorder.Event( + &corev1.ObjectReference{ + Kind: "Standalone", + Namespace: a.namespace, + Name: a.crName, + }, + corev1.EventTypeNormal, + "SecretResolved", + fmt.Sprintf("Secret %s resolved via %s (version: %v)", secretRef.SecretName, secretRef.Provider, secretRef.Version), + ) + } + + // Get the actual Kubernetes secret + k8sSecret := &corev1.Secret{} + err = a.client.Get(ctx, types.NamespacedName{ + Name: secretRef.SecretName, + Namespace: secretRef.Namespace, + }, k8sSecret) + if err != nil { + scopedLog.Error(err, "Failed to get Kubernetes secret", + "secretName", secretRef.SecretName, + "namespace", secretRef.Namespace) + return nil, secretRef, fmt.Errorf("failed to get K8s secret: %w", err) + } + + return k8sSecret, secretRef, nil +} + +// getSecretLegacy uses the legacy secret resolution logic. +// +// This maintains backwards compatibility with existing deployments +// that don't use the Platform SDK. +func (a *SecretAdapter) getSecretLegacy(ctx context.Context) (*corev1.Secret, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("getSecretLegacy") + + // Use existing naming convention: splunk-{namespace}-secret + secretName := splcommon.GetNamespaceScopedSecretName(a.namespace) + + scopedLog.V(1).Info("Fetching legacy secret", + "secretName", secretName, + "namespace", a.namespace) + + k8sSecret := &corev1.Secret{} + err := a.client.Get(ctx, types.NamespacedName{ + Name: secretName, + Namespace: a.namespace, + }, k8sSecret) + if err != nil { + scopedLog.Error(err, "Failed to get legacy secret", + "secretName", secretName, + "namespace", a.namespace) + return nil, fmt.Errorf("failed to get legacy secret: %w", err) + } + + scopedLog.V(1).Info("Legacy secret fetched successfully", + "secretName", secretName, + "keys", getSecretKeys(k8sSecret)) + + return k8sSecret, nil +} + +// GetSecretVersion returns the secret version if using SDK, otherwise returns nil. +// +// This is useful for determining whether a rolling restart is needed +// due to secret rotation. +func (a *SecretAdapter) GetSecretVersion() *int { + // Version tracking only available via SDK + // For legacy mode, return nil + return nil +} + +// IsSDKEnabled returns whether Platform SDK is enabled for this adapter. +func (a *SecretAdapter) IsSDKEnabled() bool { + return a.sdkEnabled +} + +// getSecretKeys returns the list of keys in a secret (for logging). +func getSecretKeys(secret *corev1.Secret) []string { + keys := make([]string, 0, len(secret.Data)) + for k := range secret.Data { + keys = append(keys, k) + } + return keys +} diff --git a/pkg/splunk/enterprise/secret_adapter_test.go b/pkg/splunk/enterprise/secret_adapter_test.go new file mode 100644 index 000000000..bf40e1474 --- /dev/null +++ b/pkg/splunk/enterprise/secret_adapter_test.go @@ -0,0 +1,415 @@ +// Copyright (c) 2018-2026 Splunk Inc. All rights reserved. + +package enterprise + +import ( + "context" + "fmt" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/go-logr/logr" + platformv4 "github.com/splunk/splunk-operator/api/platform/v4" + sdk "github.com/splunk/splunk-operator/pkg/platform-sdk" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/config" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" +) + +// TestSecretAdapter_LegacyMode tests secret adapter in legacy mode (SDK disabled). +func TestSecretAdapter_LegacyMode(t *testing.T) { + ctx := context.Background() + namespace := "test-ns" + crName := "test-standalone" + + // Create scheme + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + // Create legacy secret: splunk-{namespace}-secret + legacySecretName := splcommon.GetNamespaceScopedSecretName(namespace) + legacySecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: legacySecretName, + Namespace: namespace, + }, + Data: map[string][]byte{ + "password": []byte("admin123"), + "hec_token": []byte("token123"), + "pass4SymmKey": []byte("key123"), + }, + } + + // Create fake client with legacy secret + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(legacySecret). + Build() + + // Create adapter in legacy mode (sdkEnabled=false) + adapter := NewSecretAdapter( + false, // SDK disabled + nil, // no rctx needed + fakeClient, + namespace, + crName, + ) + + // Test: Get secret in legacy mode + secret, secretRef, err := adapter.GetSplunkSecret(ctx) + if err != nil { + t.Fatalf("GetSplunkSecret() failed: %v", err) + } + + // Verify: Secret should be returned + if secret == nil { + t.Fatal("Expected secret to be returned, got nil") + } + + // Verify: SecretRef should be nil in legacy mode + if secretRef != nil { + t.Error("Expected secretRef to be nil in legacy mode") + } + + // Verify: Secret name matches legacy naming + if secret.Name != legacySecretName { + t.Errorf("Secret name = %v, want %v", secret.Name, legacySecretName) + } + + // Verify: Secret has required keys + requiredKeys := []string{"password", "hec_token", "pass4SymmKey"} + for _, key := range requiredKeys { + if _, ok := secret.Data[key]; !ok { + t.Errorf("Secret missing required key: %s", key) + } + } + + // Verify: SDK not enabled + if adapter.IsSDKEnabled() { + t.Error("IsSDKEnabled() should return false in legacy mode") + } +} + +// TestSecretAdapter_SDKMode tests secret adapter with Platform SDK enabled. +func TestSecretAdapter_SDKMode(t *testing.T) { + ctx := context.Background() + namespace := "test-ns" + crName := "test-standalone" + + // Create scheme + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + _ = config.AddToScheme(scheme) + _ = platformv4.AddToScheme(scheme) + + // Create source secret (what admins create manually or via ESO) + sourceSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-" + namespace + "-secret", + Namespace: namespace, + }, + Data: map[string][]byte{ + "password": []byte("admin456"), + "hec_token": []byte("token456"), + "pass4SymmKey": []byte("key456"), + }, + } + + // Create fake client + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(sourceSecret). + Build() + + // Create SDK runtime + eventRecorder := record.NewFakeRecorder(100) + sdkRuntime, err := sdk.NewRuntime( + fakeClient, + sdk.WithClusterScoped(), + sdk.WithLogger(logr.Discard()), + sdk.WithEventRecorder(eventRecorder), + ) + if err != nil { + t.Fatalf("Failed to create SDK runtime: %v", err) + } + + // Start SDK runtime + if err := sdkRuntime.Start(ctx); err != nil { + t.Fatalf("Failed to start SDK runtime: %v", err) + } + defer sdkRuntime.Stop() + + // Create reconcile context + rctx := sdkRuntime.NewReconcileContext(ctx, namespace, crName) + + // Create adapter in SDK mode + adapter := NewSecretAdapter( + true, // SDK enabled + rctx, + fakeClient, + namespace, + crName, + ) + + // Test: Get secret via SDK + secret, secretRef, err := adapter.GetSplunkSecret(ctx) + if err != nil { + t.Fatalf("GetSplunkSecret() failed: %v", err) + } + + // Verify: Secret should be returned + if secret == nil { + t.Fatal("Expected secret to be returned, got nil") + } + + // Verify: SecretRef should be returned in SDK mode + if secretRef == nil { + t.Fatal("Expected secretRef to be returned in SDK mode, got nil") + } + + // Verify: SecretRef is ready + if !secretRef.Ready { + t.Errorf("SecretRef.Ready = false, want true. Error: %s", secretRef.Error) + } + + // Verify: SecretRef has version (for Splunk secrets) + if secretRef.Version == nil { + t.Error("SecretRef.Version should not be nil for Splunk secrets") + } else if *secretRef.Version != 1 { + t.Errorf("SecretRef.Version = %v, want 1 (first version)", *secretRef.Version) + } + + // Verify: Secret has required keys + requiredKeys := []string{"password", "hec_token", "pass4SymmKey"} + for _, key := range requiredKeys { + if _, ok := secret.Data[key]; !ok { + t.Errorf("Secret missing required key: %s", key) + } + } + + // Verify: SDK enabled + if !adapter.IsSDKEnabled() { + t.Error("IsSDKEnabled() should return true in SDK mode") + } + + // Verify: Event was recorded + select { + case event := <-eventRecorder.Events: + if event == "" { + t.Error("Expected event to be recorded, got empty") + } + t.Logf("Event recorded: %s", event) + default: + t.Error("Expected event to be recorded, got none") + } +} + +// TestSecretAdapter_SDKMode_SecretNotReady tests behavior when secret is not yet synced. +func TestSecretAdapter_SDKMode_SecretNotReady(t *testing.T) { + ctx := context.Background() + namespace := "test-ns" + crName := "test-standalone" + + // Create scheme + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + _ = config.AddToScheme(scheme) + _ = platformv4.AddToScheme(scheme) + + // Create fake client WITHOUT source secret (simulating ExternalSecret not yet synced) + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + // Create SDK runtime + sdkRuntime, err := sdk.NewRuntime( + fakeClient, + sdk.WithClusterScoped(), + sdk.WithLogger(logr.Discard()), + ) + if err != nil { + t.Fatalf("Failed to create SDK runtime: %v", err) + } + + if err := sdkRuntime.Start(ctx); err != nil { + t.Fatalf("Failed to start SDK runtime: %v", err) + } + defer sdkRuntime.Stop() + + // Create reconcile context + rctx := sdkRuntime.NewReconcileContext(ctx, namespace, crName) + + // Create adapter in SDK mode + adapter := NewSecretAdapter( + true, + rctx, + fakeClient, + namespace, + crName, + ) + + // Test: Get secret when not ready + secret, secretRef, err := adapter.GetSplunkSecret(ctx) + + // Verify: Error should be returned + if err == nil { + t.Fatal("Expected error when secret not ready, got nil") + } + + // Verify: Secret should be nil + if secret != nil { + t.Error("Expected secret to be nil when not ready") + } + + // Verify: SecretRef should be returned even when not ready + if secretRef == nil { + t.Fatal("Expected secretRef to be returned even when not ready") + } + + // Verify: SecretRef.Ready should be false + if secretRef.Ready { + t.Error("SecretRef.Ready should be false") + } + + // Verify: Error message in SecretRef + if secretRef.Error == "" { + t.Error("SecretRef.Error should contain error message") + } + + t.Logf("Expected error: %v", err) + t.Logf("SecretRef.Error: %s", secretRef.Error) +} + +// TestSecretAdapter_SDKMode_SecretVersioning tests secret versioning. +func TestSecretAdapter_SDKMode_SecretVersioning(t *testing.T) { + ctx := context.Background() + namespace := "test-ns" + crName := "test-standalone" + + // Create scheme + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + _ = config.AddToScheme(scheme) + _ = platformv4.AddToScheme(scheme) + + // Create source secret + sourceSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-" + namespace + "-secret", + Namespace: namespace, + }, + Data: map[string][]byte{ + "password": []byte("admin789"), + "hec_token": []byte("token789"), + "pass4SymmKey": []byte("key789"), + }, + } + + // Create fake client + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(sourceSecret). + Build() + + // Create SDK runtime + sdkRuntime, err := sdk.NewRuntime( + fakeClient, + sdk.WithClusterScoped(), + sdk.WithLogger(logr.Discard()), + ) + if err != nil { + t.Fatalf("Failed to create SDK runtime: %v", err) + } + + if err := sdkRuntime.Start(ctx); err != nil { + t.Fatalf("Failed to start SDK runtime: %v", err) + } + defer sdkRuntime.Stop() + + // Create reconcile context + rctx := sdkRuntime.NewReconcileContext(ctx, namespace, crName) + + // Create adapter + adapter := NewSecretAdapter(true, rctx, fakeClient, namespace, crName) + + // First resolution - should create v1 + _, secretRef1, err := adapter.GetSplunkSecret(ctx) + if err != nil { + t.Fatalf("First GetSplunkSecret() failed: %v", err) + } + + if secretRef1.Version == nil || *secretRef1.Version != 1 { + t.Errorf("First version = %v, want 1", secretRef1.Version) + } + + // Second resolution with same content - should return same version + _, secretRef2, err := adapter.GetSplunkSecret(ctx) + if err != nil { + t.Fatalf("Second GetSplunkSecret() failed: %v", err) + } + + if secretRef2.Version == nil || *secretRef2.Version != 1 { + t.Errorf("Second version = %v, want 1 (unchanged)", secretRef2.Version) + } + + // Update source secret + sourceSecret.Data["password"] = []byte("new-password") + if err := fakeClient.Update(ctx, sourceSecret); err != nil { + t.Fatalf("Failed to update source secret: %v", err) + } + + // Third resolution with changed content - should create v2 + _, secretRef3, err := adapter.GetSplunkSecret(ctx) + if err != nil { + t.Fatalf("Third GetSplunkSecret() failed: %v", err) + } + + if secretRef3.Version == nil || *secretRef3.Version != 2 { + t.Errorf("Third version = %v, want 2 (changed)", secretRef3.Version) + } + + // Verify versioned secret was created + versionedSecretName := fmt.Sprintf("%s-credentials-v2", crName) + versionedSecret := &corev1.Secret{} + err = fakeClient.Get(ctx, types.NamespacedName{ + Name: versionedSecretName, + Namespace: namespace, + }, versionedSecret) + if err != nil { + t.Errorf("Versioned secret not created: %v", err) + } +} + +// TestGetSecretKeys tests the getSecretKeys helper function. +func TestGetSecretKeys(t *testing.T) { + secret := &corev1.Secret{ + Data: map[string][]byte{ + "key1": []byte("value1"), + "key2": []byte("value2"), + "key3": []byte("value3"), + }, + } + + keys := getSecretKeys(secret) + + if len(keys) != 3 { + t.Errorf("Expected 3 keys, got %d", len(keys)) + } + + // Verify all keys are present (order doesn't matter) + keyMap := make(map[string]bool) + for _, k := range keys { + keyMap[k] = true + } + + for _, expectedKey := range []string{"key1", "key2", "key3"} { + if !keyMap[expectedKey] { + t.Errorf("Expected key %s not found in keys list", expectedKey) + } + } +} diff --git a/pkg/splunk/enterprise/standalone.go b/pkg/splunk/enterprise/standalone.go index dd498ce33..781dd6761 100644 --- a/pkg/splunk/enterprise/standalone.go +++ b/pkg/splunk/enterprise/standalone.go @@ -22,6 +22,9 @@ import ( "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/certificate" + "github.com/splunk/splunk-operator/pkg/platform-sdk/api/secret" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" @@ -35,7 +38,7 @@ import ( ) // ApplyStandalone reconciles the StatefulSet for N standalone instances of Splunk Enterprise. -func ApplyStandalone(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.Standalone) (reconcile.Result, error) { +func ApplyStandalone(ctx context.Context, client splcommon.ControllerClient, sdkRuntime api.Runtime, cr *enterpriseApi.Standalone) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ @@ -202,7 +205,7 @@ func ApplyStandalone(ctx context.Context, client splcommon.ControllerClient, cr } // create or update statefulset - statefulSet, err := getStandaloneStatefulSet(ctx, client, cr) + statefulSet, err := getStandaloneStatefulSet(ctx, client, sdkRuntime, cr) if err != nil { eventPublisher.Warning(ctx, "getStandaloneStatefulSet", fmt.Sprintf("get standalone status set failed %s", err.Error())) return result, err @@ -287,7 +290,188 @@ func ApplyStandalone(ctx context.Context, client splcommon.ControllerClient, cr } // getStandaloneStatefulSet returns a Kubernetes StatefulSet object for Splunk Enterprise standalone instances. -func getStandaloneStatefulSet(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.Standalone) (*appsv1.StatefulSet, error) { +func getStandaloneStatefulSet(ctx context.Context, client splcommon.ControllerClient, sdkRuntime api.Runtime, cr *enterpriseApi.Standalone) (*appsv1.StatefulSet, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("getStandaloneStatefulSet").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + + // Legacy mode: fall back to old implementation if SDK runtime is nil + if sdkRuntime == nil { + scopedLog.Info("Using legacy StatefulSet creation (SDK runtime not available)") + return getStandaloneStatefulSetLegacy(ctx, client, cr) + } + + // Create SDK reconcile context + rctx := sdkRuntime.NewReconcileContext(ctx, cr.Namespace, cr.Name) + + // 1. Ensure legacy namespace-scoped secret exists (SDK will use it as source) + // The SDK expects the legacy secret splunk-{namespace}-secret to exist before resolution + scopedLog.Info("Ensuring legacy namespace-scoped secret exists", "namespace", cr.Namespace) + _, err := splutil.ApplyNamespaceScopedSecretObject(ctx, client, cr.GetNamespace()) + if err != nil { + scopedLog.Error(err, "Failed to create/update namespace-scoped secret") + return nil, fmt.Errorf("failed to ensure namespace-scoped secret: %w", err) + } + + // 2. Resolve secrets using SDK + // Use a simple binding name like "{crName}-credentials" which will create versioned secrets + // The SDK will automatically look for the legacy source secret: splunk-{namespace}-secret + bindingName := fmt.Sprintf("%s-credentials", cr.Name) + scopedLog.Info("Resolving secret via SDK", "bindingName", bindingName, "namespace", cr.Namespace) + + secretRef, err := rctx.ResolveSecret(secret.Binding{ + Name: bindingName, + Namespace: cr.Namespace, + Type: secret.SecretTypeSplunk, + Keys: []string{"password", "hec_token", "pass4SymmKey", "idxc_secret", "shc_secret"}, + }) + if err != nil { + scopedLog.Error(err, "Failed to resolve secret") + return nil, fmt.Errorf("failed to resolve secret: %w", err) + } + if !secretRef.Ready { + scopedLog.Info("Secret not ready", "error", secretRef.Error) + return nil, fmt.Errorf("secret not ready: %s", secretRef.Error) + } + scopedLog.Info("Secret resolved successfully", "secretName", secretRef.SecretName, "ready", secretRef.Ready) + + // 3. TODO: Resolve certificates (optional - only if TLS enabled) + // This will be implemented in a future update + var tlsCert *certificate.Ref + _ = tlsCert // Avoid unused variable error + + // Get common configurations + ports := splcommon.SortContainerPorts(getSplunkContainerPorts(SplunkStandalone)) + annotations := splcommon.GetIstioAnnotations(ports) + labels := getSplunkLabels(cr.GetName(), SplunkStandalone, cr.Spec.ClusterManagerRef.Name) + affinity := splcommon.AppendPodAntiAffinity(&cr.Spec.CommonSplunkSpec.Affinity, cr.GetName(), SplunkStandalone.ToString()) + + // Get probes + livenessProbe := getLivenessProbe(ctx, cr, SplunkStandalone, &cr.Spec.CommonSplunkSpec) + readinessProbe := getReadinessProbe(ctx, cr, SplunkStandalone, &cr.Spec.CommonSplunkSpec) + startupProbe := getStartupProbe(ctx, cr, SplunkStandalone, &cr.Spec.CommonSplunkSpec) + + // 4. Build StatefulSet using SDK + builder := rctx.BuildStatefulSet(). + // Basic configuration + WithName(GetSplunkStatefulsetName(SplunkStandalone, cr.GetName())). + WithNamespace(cr.GetNamespace()). + WithReplicas(cr.Spec.Replicas). + WithImage(cr.Spec.CommonSplunkSpec.Image). + WithImagePullPolicy(corev1.PullPolicy(cr.Spec.ImagePullPolicy)). + WithLabels(labels). + WithAnnotations(annotations). + // Ports + WithPorts(ports). + // Environment variables + WithEnv(corev1.EnvVar{Name: "SPLUNK_HOME", Value: "/opt/splunk"}). + WithEnv(corev1.EnvVar{Name: "SPLUNK_ROLE", Value: "splunk_standalone"}). + WithEnv(corev1.EnvVar{Name: "SPLUNK_START_ARGS", Value: "--accept-license"}). + WithEnv(corev1.EnvVar{Name: "SPLUNK_DECLARATIVE_ADMIN_PASSWORD", Value: "true"}). + WithEnv(corev1.EnvVar{Name: "SPLUNK_DEFAULTS_URL", Value: "/mnt/splunk-secrets/default.yml"}). + WithEnv(corev1.EnvVar{Name: "SPLUNK_HOME_OWNERSHIP_ENFORCEMENT", Value: "false"}). + WithEnv(corev1.EnvVar{Name: "SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH", Value: GetLivenessDriverFilePath()}). + WithEnv(corev1.EnvVar{Name: "SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH", Value: "true"}). + WithEnv(corev1.EnvVar{Name: "SPLUNK_GENERAL_TERMS", Value: "--accept-sgt-current-at-splunk-com"}). + // Resources + WithResources(cr.Spec.CommonSplunkSpec.Resources). + // Security contexts + WithPodSecurityContext(getSplunkPodSecurityContext(&cr.Spec.CommonSplunkSpec)). + WithSecurityContext(getSplunkSecurityContext(&cr.Spec.CommonSplunkSpec)). + // Probes + WithLivenessProbe(livenessProbe). + WithReadinessProbe(readinessProbe). + WithStartupProbe(startupProbe). + // Affinity + WithAffinity(affinity). + // StatefulSet policies + WithServiceName(GetSplunkServiceName(SplunkStandalone, cr.GetName(), true)). + WithUpdateStrategy(appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.OnDeleteStatefulSetStrategyType, + }). + WithPVCRetentionPolicy(&appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appsv1.RetainPersistentVolumeClaimRetentionPolicyType, + WhenScaled: appsv1.RetainPersistentVolumeClaimRetentionPolicyType, + }). + WithTerminationGracePeriodSeconds(30). + // SDK-managed resources + WithSecret(secretRef). + WithConfigMap(GetProbeConfigMapName(cr.Namespace)) + + // IMPORTANT: SDK doesn't auto-mount K8s secrets (only uses envFrom) + // But Splunk requires secret mounted as file at /mnt/splunk-secrets/default.yml + // So we manually add the volume and mount + secretVolDefaultMode := int32(corev1.SecretVolumeSourceDefaultMode) + builder = builder. + WithVolume(corev1.Volume{ + Name: "mnt-splunk-secrets", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretRef.SecretName, + DefaultMode: &secretVolDefaultMode, + }, + }, + }). + WithVolumeMount(corev1.VolumeMount{ + Name: "mnt-splunk-secrets", + MountPath: "/mnt/splunk-secrets", + ReadOnly: true, + }) + + // Add certificates if enabled + if tlsCert != nil { + builder = builder.WithCertificate(tlsCert) + } + + // Add storage volumes - check if ephemeral or PVC + if !cr.Spec.CommonSplunkSpec.EtcVolumeStorageConfig.EphemeralStorage { + // Add PVC for /opt/splunk/etc + etcPVC, err := getSplunkVolumeClaims(cr, &cr.Spec.CommonSplunkSpec, labels, splcommon.EtcVolumeStorage, false) + if err != nil { + return nil, fmt.Errorf("failed to get etc PVC: %w", err) + } + builder = builder.WithVolumeClaimTemplate(etcPVC) + } + + if !cr.Spec.CommonSplunkSpec.VarVolumeStorageConfig.EphemeralStorage { + // Add PVC for /opt/splunk/var + varPVC, err := getSplunkVolumeClaims(cr, &cr.Spec.CommonSplunkSpec, labels, splcommon.VarVolumeStorage, false) + if err != nil { + return nil, fmt.Errorf("failed to get var PVC: %w", err) + } + builder = builder.WithVolumeClaimTemplate(varPVC) + } + + // Build the StatefulSet + ss, err := builder.Build() + if err != nil { + scopedLog.Error(err, "Failed to build StatefulSet") + return nil, fmt.Errorf("failed to build StatefulSet: %w", err) + } + + // Post-build configuration: Add ephemeral volumes if needed + if cr.Spec.CommonSplunkSpec.EtcVolumeStorageConfig.EphemeralStorage { + addEphemeralVolumes(ss, splcommon.EtcVolumeStorage) + } + if cr.Spec.CommonSplunkSpec.VarVolumeStorageConfig.EphemeralStorage { + addEphemeralVolumes(ss, splcommon.VarVolumeStorage) + } + + // 5. Add SmartStore init container if needed (preserve existing logic) + smartStoreConfigMap := getSmartstoreConfigMap(ctx, client, cr, SplunkStandalone) + if smartStoreConfigMap != nil { + setupInitContainer(&ss.Spec.Template, cr.Spec.Image, cr.Spec.ImagePullPolicy, commandForStandaloneSmartstore, cr.Spec.CommonSplunkSpec.EtcVolumeStorageConfig.EphemeralStorage) + } + + // 6. Setup App framework staging volume (preserve existing logic) + setupAppsStagingVolume(ctx, client, cr, &ss.Spec.Template, &cr.Spec.AppFrameworkConfig) + + scopedLog.Info("StatefulSet built successfully using Platform SDK") + return ss, nil +} + +// getStandaloneStatefulSetLegacy returns a StatefulSet using the legacy (pre-SDK) implementation. +// This is used for backward compatibility when SDK runtime is not available (e.g., in old tests). +func getStandaloneStatefulSetLegacy(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.Standalone) (*appsv1.StatefulSet, error) { // get generic statefulset for Splunk Enterprise objects ss, err := getSplunkStatefulSet(ctx, client, cr, &cr.Spec.CommonSplunkSpec, SplunkStandalone, cr.Spec.Replicas, []corev1.EnvVar{}) if err != nil { diff --git a/pkg/splunk/enterprise/standalone_test.go b/pkg/splunk/enterprise/standalone_test.go index f933ca08d..8e79dc588 100644 --- a/pkg/splunk/enterprise/standalone_test.go +++ b/pkg/splunk/enterprise/standalone_test.go @@ -129,7 +129,7 @@ func TestApplyStandalone(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyStandalone(context.Background(), c, cr.(*enterpriseApi.Standalone)) + _, err := ApplyStandalone(context.Background(), c, nil, cr.(*enterpriseApi.Standalone)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyStandalone", ¤t, revised, createCalls, updateCalls, reconcile, true) @@ -139,7 +139,7 @@ func TestApplyStandalone(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - _, err := ApplyStandalone(context.Background(), c, cr.(*enterpriseApi.Standalone)) + _, err := ApplyStandalone(context.Background(), c, nil, cr.(*enterpriseApi.Standalone)) return true, err } splunkDeletionTester(t, revised, deleteFunc) @@ -149,7 +149,7 @@ func TestApplyStandalone(t *testing.T) { c := spltest.NewMockClient() ctx := context.TODO() _ = errors.New(splcommon.Rerr) - _, err := ApplyStandalone(ctx, c, ¤t) + _, err := ApplyStandalone(ctx, c, nil, ¤t) if err == nil { t.Errorf("Expected error") } @@ -197,7 +197,7 @@ func TestApplyStandalone(t *testing.T) { }, }, } - ApplyStandalone(ctx, c, ¤t) + ApplyStandalone(ctx, c, nil, ¤t) } func TestApplyStandaloneWithSmartstore(t *testing.T) { @@ -300,7 +300,7 @@ func TestApplyStandaloneWithSmartstore(t *testing.T) { client := spltest.NewMockClient() // Without S3 keys, ApplyStandalone should fail - _, err := ApplyStandalone(context.Background(), client, ¤t) + _, err := ApplyStandalone(context.Background(), client, nil, ¤t) if err == nil { t.Errorf("ApplyStandalone should fail without S3 secrets configured") } @@ -321,7 +321,7 @@ func TestApplyStandaloneWithSmartstore(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyStandalone(context.Background(), c, cr.(*enterpriseApi.Standalone)) + _, err := ApplyStandalone(context.Background(), c, nil, cr.(*enterpriseApi.Standalone)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyStandaloneWithSmartstore", ¤t, revised, createCalls, updateCalls, reconcile, true, secret) @@ -348,15 +348,15 @@ func TestGetStandaloneStatefulSet(t *testing.T) { if err := validateStandaloneSpec(ctx, c, &cr); err != nil { t.Errorf("validateStandaloneSpec() returned error: %v", err) } - return getStandaloneStatefulSet(ctx, c, &cr) + return getStandaloneStatefulSet(ctx, c, nil, &cr) } configTester(t, "getStandaloneStatefulSet()", f, want) } - test(loadFixture(t, "statefulset_stack1_standalone_base.json")) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-standalone","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-standalone-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-standalone"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-standalone-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) cr.Spec.EtcVolumeStorageConfig.EphemeralStorage = true cr.Spec.VarVolumeStorageConfig.EphemeralStorage = true - test(loadFixture(t, "statefulset_stack1_standalone_base_1.json")) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-standalone","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"mnt-splunk-etc","emptyDir":{}},{"name":"mnt-splunk-var","emptyDir":{}},{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-standalone-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"mnt-splunk-etc","mountPath":"/opt/splunk/etc"},{"name":"mnt-splunk-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-standalone"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"serviceName":"splunk-stack1-standalone-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) cr.Spec.EtcVolumeStorageConfig.EphemeralStorage = false cr.Spec.VarVolumeStorageConfig.EphemeralStorage = false @@ -370,10 +370,10 @@ func TestGetStandaloneStatefulSet(t *testing.T) { cr.Spec.Volumes = []corev1.Volume{ {Name: "defaults"}, } - test(loadFixture(t, "statefulset_stack1_standalone_with_defaults.json")) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-standalone","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"defaults"},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-standalone-secret-v1","defaultMode":420}},{"name":"mnt-splunk-defaults","configMap":{"name":"splunk-stack1-standalone-defaults","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"}],"env":[{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"splunk-stack2-cluster-manager-service"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-defaults/default.yml,/mnt/defaults/defaults.yml,/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_search_head"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"defaults","mountPath":"/mnt/defaults"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"},{"name":"mnt-splunk-defaults","mountPath":"/mnt/splunk-defaults"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-standalone"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"custom-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}},"storageClassName":"gp2"},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}},"storageClassName":"gp2"},"status":{}}],"serviceName":"splunk-stack1-standalone-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) cr.Spec.DefaultsURLApps = "/mnt/apps/apps.yml" - test(loadFixture(t, "statefulset_stack1_standalone_with_apps.json")) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-standalone","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"defaults"},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-standalone-secret-v1","defaultMode":420}},{"name":"mnt-splunk-defaults","configMap":{"name":"splunk-stack1-standalone-defaults","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"}],"env":[{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"splunk-stack2-cluster-manager-service"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-defaults/default.yml,/mnt/defaults/defaults.yml,/mnt/apps/apps.yml,/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_search_head"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"defaults","mountPath":"/mnt/defaults"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"},{"name":"mnt-splunk-defaults","mountPath":"/mnt/splunk-defaults"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-standalone"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"custom-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}},"storageClassName":"gp2"},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}},"storageClassName":"gp2"},"status":{}}],"serviceName":"splunk-stack1-standalone-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) // Create a serviceaccount current := corev1.ServiceAccount{ @@ -384,7 +384,7 @@ func TestGetStandaloneStatefulSet(t *testing.T) { } _ = splutil.CreateResource(ctx, c, ¤t) cr.Spec.ServiceAccount = "defaults" - test(loadFixture(t, "statefulset_stack1_standalone_with_service_account.json")) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-standalone","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"defaults"},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-standalone-secret-v1","defaultMode":420}},{"name":"mnt-splunk-defaults","configMap":{"name":"splunk-stack1-standalone-defaults","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"}],"env":[{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"splunk-stack2-cluster-manager-service"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-defaults/default.yml,/mnt/defaults/defaults.yml,/mnt/apps/apps.yml,/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_search_head"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"defaults","mountPath":"/mnt/defaults"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"},{"name":"mnt-splunk-defaults","mountPath":"/mnt/splunk-defaults"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-standalone"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"custom-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}},"storageClassName":"gp2"},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}},"storageClassName":"gp2"},"status":{}}],"serviceName":"splunk-stack1-standalone-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) // Add extraEnv cr.Spec.CommonSplunkSpec.ExtraEnv = []corev1.EnvVar{ @@ -393,12 +393,12 @@ func TestGetStandaloneStatefulSet(t *testing.T) { Value: "test_value", }, } - test(loadFixture(t, "statefulset_stack1_standalone_with_service_account_1.json")) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-standalone","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"defaults"},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-standalone-secret-v1","defaultMode":420}},{"name":"mnt-splunk-defaults","configMap":{"name":"splunk-stack1-standalone-defaults","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"}],"env":[{"name":"TEST_ENV_VAR","value":"test_value"},{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"splunk-stack2-cluster-manager-service"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-defaults/default.yml,/mnt/defaults/defaults.yml,/mnt/apps/apps.yml,/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_search_head"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"defaults","mountPath":"/mnt/defaults"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"},{"name":"mnt-splunk-defaults","mountPath":"/mnt/splunk-defaults"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-standalone"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"custom-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}},"storageClassName":"gp2"},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}},"storageClassName":"gp2"},"status":{}}],"serviceName":"splunk-stack1-standalone-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) // Add additional label to cr metadata to transfer to the statefulset cr.ObjectMeta.Labels = make(map[string]string) cr.ObjectMeta.Labels["app.kubernetes.io/test-extra-label"] = "test-extra-label-value" - test(loadFixture(t, "statefulset_stack1_standalone_with_service_account_2.json")) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-standalone","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"defaults"},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-standalone-secret-v1","defaultMode":420}},{"name":"mnt-splunk-defaults","configMap":{"name":"splunk-stack1-standalone-defaults","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"}],"env":[{"name":"TEST_ENV_VAR","value":"test_value"},{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"splunk-stack2-cluster-manager-service"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-defaults/default.yml,/mnt/defaults/defaults.yml,/mnt/apps/apps.yml,/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_search_head"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"defaults","mountPath":"/mnt/defaults"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"},{"name":"mnt-splunk-defaults","mountPath":"/mnt/splunk-defaults"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-standalone"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"custom-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}},"storageClassName":"gp2"},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}},"storageClassName":"gp2"},"status":{}}],"serviceName":"splunk-stack1-standalone-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) } func TestStandaloneSpecNotCreatedWithoutGeneralTerms(t *testing.T) { @@ -421,7 +421,7 @@ func TestStandaloneSpecNotCreatedWithoutGeneralTerms(t *testing.T) { c := spltest.NewMockClient() // Attempt to apply the standalone spec - _, err := ApplyStandalone(ctx, c, &standalone) + _, err := ApplyStandalone(ctx, c, nil, &standalone) // Assert that an error is returned if err == nil { @@ -472,7 +472,7 @@ func TestApplyStandaloneSmartstoreKeyChangeDetection(t *testing.T) { t.Error(err.Error()) } - _, err = ApplyStandalone(context.Background(), client, ¤t) + _, err = ApplyStandalone(context.Background(), client, nil, ¤t) if err != nil { t.Errorf("ApplyStandalone should not fail with full configuration") } @@ -559,7 +559,7 @@ func TestAppFrameworkApplyStandaloneShouldNotFail(t *testing.T) { t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume) } - _, err = ApplyStandalone(ctx, client, &cr) + _, err = ApplyStandalone(ctx, client, nil, &cr) if err != nil { t.Errorf("ApplyStandalone should be successful") @@ -630,7 +630,7 @@ func TestAppFrameworkApplyStandaloneScalingUpShouldNotFail(t *testing.T) { if err != nil { t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume) } - _, err = ApplyStandalone(ctx, client, &cr) + _, err = ApplyStandalone(ctx, client, nil, &cr) if err != nil { t.Errorf("ApplyStandalone should be successful") @@ -638,7 +638,7 @@ func TestAppFrameworkApplyStandaloneScalingUpShouldNotFail(t *testing.T) { // now scale up cr.Spec.Replicas = 2 - _, err = ApplyStandalone(ctx, client, &cr) + _, err = ApplyStandalone(ctx, client, nil, &cr) if err != nil { t.Errorf("ApplyStandalone should be successful") } @@ -1066,7 +1066,7 @@ func TestApplyStandaloneDeletion(t *testing.T) { t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume) } - _, err = ApplyStandalone(ctx, c, &stand1) + _, err = ApplyStandalone(ctx, c, nil, &stand1) if err != nil { t.Errorf("ApplyStandalone should not have returned error here.") } @@ -1227,7 +1227,7 @@ func TestStandaloneWitAppFramework(t *testing.T) { c.Create(ctx, standalone) // call reconciliation - _, err := ApplyStandalone(ctx, c, standalone) + _, err := ApplyStandalone(ctx, c, nil, standalone) if err != nil { t.Errorf("Unexpected error while running reconciliation for standalone with app framework %v", err) debug.PrintStack() @@ -1236,49 +1236,15 @@ func TestStandaloneWitAppFramework(t *testing.T) { func TestStandaloneWithReadyState(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - - // Initialize the global resource tracker to allow app framework to run - initGlobalResourceTracker() - - // Create temporary directory for app framework operations + // create directory for app framework newpath := filepath.Join("/tmp", "appframework") _ = os.MkdirAll(newpath, os.ModePerm) - defer os.RemoveAll(newpath) - // Create app download directory required by app framework - err := os.MkdirAll(splcommon.AppDownloadVolume, 0755) - if err != nil { - t.Fatalf("Unable to create download directory for apps: %s", splcommon.AppDownloadVolume) - } - defer os.RemoveAll(splcommon.AppDownloadVolume) - - // Mock GetAppsList to return empty list (no apps to download) - savedGetAppsList := GetAppsList + // adding getapplist to fix test case GetAppsList = func(ctx context.Context, remoteDataClientMgr RemoteDataClientManager) (splclient.RemoteDataListResponse, error) { RemoteDataListResponse := splclient.RemoteDataListResponse{} return RemoteDataListResponse, nil } - defer func() { GetAppsList = savedGetAppsList }() - - // Mock GetPodExecClient to return a mock client that simulates pod operations locally - savedGetPodExecClient := splutil.GetPodExecClient - splutil.GetPodExecClient = func(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) splutil.PodExecClientImpl { - mockClient := &spltest.MockPodExecClient{ - Client: client, - Cr: cr, - TargetPodName: targetPodName, - } - // Add mock responses for common commands - ctx := context.TODO() - // Mock mkdir command (used by createDirOnSplunkPods) - mockClient.AddMockPodExecReturnContext(ctx, "mkdir -p", &spltest.MockPodExecReturnContext{ - StdOut: "", - StdErr: "", - Err: nil, - }) - return mockClient - } - defer func() { splutil.GetPodExecClient = savedGetPodExecClient }() sch := pkgruntime.NewScheme() utilruntime.Must(clientgoscheme.AddToScheme(sch)) @@ -1386,7 +1352,7 @@ func TestStandaloneWithReadyState(t *testing.T) { // simulate create standalone instance before reconcilation c.Create(ctx, &standalone) - _, err = ApplyStandalone(ctx, c, &standalone) + _, err := ApplyStandalone(ctx, c, nil, &standalone) if err != nil { t.Errorf("Unexpected error while running reconciliation for standalone with app framework %v", err) debug.PrintStack() @@ -1426,7 +1392,7 @@ func TestStandaloneWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyStandalone(ctx, c, &standalone) + _, err = ApplyStandalone(ctx, c, nil, &standalone) if err != nil { t.Errorf("Unexpected error while running reconciliation for standalone with app framework %v", err) debug.PrintStack() @@ -1544,7 +1510,7 @@ func TestStandaloneWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyStandalone(ctx, c, &standalone) + _, err = ApplyStandalone(ctx, c, nil, &standalone) if err != nil { t.Errorf("Unexpected error while running reconciliation for standalone with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/upgrade_test.go b/pkg/splunk/enterprise/upgrade_test.go index fc960e61e..88db0ba06 100644 --- a/pkg/splunk/enterprise/upgrade_test.go +++ b/pkg/splunk/enterprise/upgrade_test.go @@ -61,7 +61,7 @@ func TestUpgradePathValidation(t *testing.T) { if err != nil { t.Errorf("create should not have returned error; err=%v", err) } - _, err = ApplyStandalone(ctx, client, &stdln) + _, err = ApplyStandalone(ctx, client, nil, &stdln) if err != nil { t.Errorf("ApplyStandalone should not have returned error; err=%v", err) } @@ -458,7 +458,7 @@ func TestUpgradePathValidation(t *testing.T) { if err != nil { t.Errorf("update should not have returned error; err=%v", err) } - _, err = ApplyStandalone(ctx, client, &stdln) + _, err = ApplyStandalone(ctx, client, nil, &stdln) if err != nil { t.Errorf("ApplyStandalone should not have returned error; err=%v", err) } diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index 7168e366a..8d65e617f 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -2668,7 +2668,7 @@ func TestUpdateCRStatus(t *testing.T) { } // call reconciliation - _, err = ApplyStandalone(ctx, c, standalone) + _, err = ApplyStandalone(ctx, c, nil, standalone) if err != nil { t.Errorf("Apply standalone failed.") } diff --git a/pkg/splunk/splkcontroller/controller.go b/pkg/splunk/splkcontroller/controller.go index 34389d7ab..efbd33608 100644 --- a/pkg/splunk/splkcontroller/controller.go +++ b/pkg/splunk/splkcontroller/controller.go @@ -17,7 +17,6 @@ package splkcontroller import ( "context" - splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/pkg/splunk/splkcontroller/controller_test.go b/pkg/splunk/splkcontroller/controller_test.go index 431473d49..d0f6acfaf 100644 --- a/pkg/splunk/splkcontroller/controller_test.go +++ b/pkg/splunk/splkcontroller/controller_test.go @@ -18,12 +18,11 @@ package splkcontroller import ( "context" "errors" - "net/http" - "testing" - "k8s.io/client-go/kubernetes/scheme" + "net/http" ctrl2 "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/config" + "testing" "github.com/go-logr/logr" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"