From 0360b3f65ebce340b21f71f0e63c8ab05f116460 Mon Sep 17 00:00:00 2001 From: Anatolii Bazko Date: Mon, 11 May 2026 14:18:07 +0200 Subject: [PATCH 1/9] feat: Manage prometheus resources Signed-off-by: Anatolii Bazko --- api/v1/zz_generated.deepcopy.go | 2 +- api/v2/checluster_types.go | 2 +- api/v2/zz_generated.deepcopy.go | 2 +- build/scripts/clear-defined-test.sh | 18 +- build/scripts/docker-run.sh | 4 +- bundle/next/eclipse-che/bundle.Dockerfile | 2 +- .../che-operator-service_v1_service.yaml | 2 +- ...c.authorization.k8s.io_v1_clusterrole.yaml | 2 +- ...c.authorization.k8s.io_v1_clusterrole.yaml | 2 +- .../org.eclipse.che_checlusters.yaml | 2 +- .../eclipse-che/metadata/annotations.yaml | 2 +- .../eclipse-che/tests/scorecard/config.yaml | 2 +- cmd/main.go | 11 +- .../bases/org.eclipse.che_checlusters.yaml | 2 +- .../che-operator.clusterserviceversion.yaml | 2 +- config/rbac/cluster_role.yaml | 15 +- controllers/che/checluster_controller.go | 12 +- deploy/deployment/kubernetes/combined.yaml | 17 +- ...che-operator-selfsigned-issuer.Issuer.yaml | 2 +- .../objects/che-operator-service.Service.yaml | 2 +- ...che-operator-serving-cert.Certificate.yaml | 2 +- .../objects/che-operator.ClusterRole.yaml | 17 +- .../che-operator.ClusterRoleBinding.yaml | 2 +- .../objects/che-operator.Deployment.yaml | 2 +- .../kubernetes/objects/che-operator.Role.yaml | 2 +- .../objects/che-operator.RoleBinding.yaml | 2 +- .../objects/che-operator.ServiceAccount.yaml | 2 +- ....eclipse.che.CustomResourceDefinition.yaml | 2 +- .../objects/eclipse-che-edit.ClusterRole.yaml | 2 +- .../objects/eclipse-che-view.ClusterRole.yaml | 2 +- .../objects/eclipse-che.Namespace.yaml | 2 +- ...ipse.che.MutatingWebhookConfiguration.yaml | 2 +- ...se.che.ValidatingWebhookConfiguration.yaml | 2 +- deploy/deployment/openshift/combined.yaml | 17 +- .../objects/che-operator-service.Service.yaml | 2 +- .../objects/che-operator.ClusterRole.yaml | 17 +- .../che-operator.ClusterRoleBinding.yaml | 2 +- .../objects/che-operator.Deployment.yaml | 2 +- .../openshift/objects/che-operator.Role.yaml | 2 +- .../objects/che-operator.RoleBinding.yaml | 2 +- .../objects/che-operator.ServiceAccount.yaml | 2 +- ....eclipse.che.CustomResourceDefinition.yaml | 2 +- .../objects/eclipse-che-edit.ClusterRole.yaml | 2 +- .../objects/eclipse-che-view.ClusterRole.yaml | 2 +- .../objects/eclipse-che.Namespace.yaml | 2 +- ...ipse.che.MutatingWebhookConfiguration.yaml | 2 +- ...se.che.ValidatingWebhookConfiguration.yaml | 2 +- go.mod | 9 +- go.sum | 18 +- hack/boilerplate.go.txt | 2 +- hack/license-header.txt | 2 +- ....eclipse.che.CustomResourceDefinition.yaml | 2 +- ...che-operator-selfsigned-issuer.Issuer.yaml | 2 +- .../che-operator-service.Service.yaml | 2 +- ...che-operator-serving-cert.Certificate.yaml | 2 +- .../templates/che-operator.ClusterRole.yaml | 17 +- .../che-operator.ClusterRoleBinding.yaml | 2 +- .../templates/che-operator.Deployment.yaml | 2 +- .../next/templates/che-operator.Role.yaml | 2 +- .../templates/che-operator.RoleBinding.yaml | 2 +- .../che-operator.ServiceAccount.yaml | 2 +- .../eclipse-che-edit.ClusterRole.yaml | 2 +- .../eclipse-che-view.ClusterRole.yaml | 2 +- ...ipse.che.MutatingWebhookConfiguration.yaml | 2 +- ...se.che.ValidatingWebhookConfiguration.yaml | 2 +- .../next/templates/org_v2_checluster.yaml | 2 +- pkg/common/constants/constants.go | 1 + pkg/common/diffs/diffs.go | 5 + pkg/common/infrastructure/cluster.go | 26 +- pkg/common/k8s-client/k8s_client_types.go | 7 + pkg/common/test/test-client/test_client.go | 14 +- .../metrics/cheserver_prometheus_resources.go | 144 + .../metrics/dwo_prometheus_resources.go | 161 + pkg/deploy/metrics/init_test.go | 26 + pkg/deploy/metrics/metrics.go | 295 ++ pkg/deploy/metrics/metrics_test.go | 116 + .../metrics/prometheus_resources_utils.go | 66 + .../pkg/apis/monitoring/LICENSE | 201 + .../pkg/apis/monitoring/register.go | 25 + .../pkg/apis/monitoring/resource.go | 95 + .../apis/monitoring/v1/alertmanager_types.go | 740 +++ .../pkg/apis/monitoring/v1/dns_types.go | 83 + .../pkg/apis/monitoring/v1/doc.go | 18 + .../pkg/apis/monitoring/v1/http_config.go | 130 + .../apis/monitoring/v1/podmonitor_types.go | 322 ++ .../pkg/apis/monitoring/v1/probe_types.go | 296 ++ .../apis/monitoring/v1/prometheus_types.go | 2561 ++++++++++ .../monitoring/v1/prometheusrule_types.go | 163 + .../pkg/apis/monitoring/v1/register.go | 67 + .../monitoring/v1/servicemonitor_types.go | 228 + .../pkg/apis/monitoring/v1/thanos_types.go | 587 +++ .../pkg/apis/monitoring/v1/types.go | 1127 +++++ .../monitoring/v1/zz_generated.deepcopy.go | 4243 +++++++++++++++++ vendor/k8s.io/klog/v2/README.md | 2 - .../klog/v2/internal/serialize/keyvalues.go | 232 +- .../internal/serialize/keyvalues_no_slog.go | 10 +- .../v2/internal/serialize/keyvalues_slog.go | 12 +- vendor/k8s.io/klog/v2/klog.go | 87 +- vendor/k8s.io/klog/v2/klogr.go | 4 +- vendor/k8s.io/klog/v2/klogr_slog.go | 11 +- vendor/modules.txt | 14 +- 101 files changed, 12151 insertions(+), 248 deletions(-) create mode 100644 pkg/deploy/metrics/cheserver_prometheus_resources.go create mode 100644 pkg/deploy/metrics/dwo_prometheus_resources.go create mode 100644 pkg/deploy/metrics/init_test.go create mode 100644 pkg/deploy/metrics/metrics.go create mode 100644 pkg/deploy/metrics/metrics_test.go create mode 100644 pkg/deploy/metrics/prometheus_resources_utils.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/resource.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/dns_types.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/doc.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/http_config.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/register.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 5eb13be5bc..8f25aa4246 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // -// Copyright (c) 2019-2024 Red Hat, Inc. +// Copyright (c) 2019-2026 Red Hat, Inc. // This program and the accompanying materials are made // available under the terms of the Eclipse Public License 2.0 // which is available at https://www.eclipse.org/legal/epl-2.0/ diff --git a/api/v2/checluster_types.go b/api/v2/checluster_types.go index 654f3ebe4f..f8f20edca7 100644 --- a/api/v2/checluster_types.go +++ b/api/v2/checluster_types.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2019-2023 Red Hat, Inc. +// Copyright (c) 2019-2026 Red Hat, Inc. // This program and the accompanying materials are made // available under the terms of the Eclipse Public License 2.0 // which is available at https://www.eclipse.org/legal/epl-2.0/ diff --git a/api/v2/zz_generated.deepcopy.go b/api/v2/zz_generated.deepcopy.go index 180e141dee..0c7bd0bcdb 100644 --- a/api/v2/zz_generated.deepcopy.go +++ b/api/v2/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // -// Copyright (c) 2019-2024 Red Hat, Inc. +// Copyright (c) 2019-2026 Red Hat, Inc. // This program and the accompanying materials are made // available under the terms of the Eclipse Public License 2.0 // which is available at https://www.eclipse.org/legal/epl-2.0/ diff --git a/build/scripts/clear-defined-test.sh b/build/scripts/clear-defined-test.sh index 18848e173b..01fb21c99c 100755 --- a/build/scripts/clear-defined-test.sh +++ b/build/scripts/clear-defined-test.sh @@ -140,17 +140,15 @@ declare -A ignored_paths=( ["go.podman.io/storage"]="Harvesting is in progress" ["golang.org/x/tools/go/packages/packagestest"]="Harvesting is in progress" ["google.golang.org/genproto"]="Harvesting is in progress" - ["k8s.io/api"]="Harvesting is in progress" ["k8s.io/apiextensions-apiserver"]="Harvesting is in progress" - ["k8s.io/apimachinery"]="Harvesting is in progress" ["k8s.io/apiserver"]="Harvesting is in progress" - ["k8s.io/client-go"]="Harvesting is in progress" ["k8s.io/code-generator"]="Harvesting is in progress" ["k8s.io/component-base"]="Harvesting is in progress" ["k8s.io/kms"]="Harvesting is in progress" ["k8s.io/kube-aggregator"]="Harvesting is in progress" - ["k8s.io/utils"]="Harvesting is in progress" + ["k8s.io/kube-openapi"]="Harvesting is in progress" ["sigs.k8s.io/controller-tools"]="Harvesting is in progress" + ["github.com/prometheus-operator/prometheus-operator"]="Harvesting is in progress" ) declare -A ignored_paths_licenses=( @@ -190,16 +188,10 @@ declare -A ignored_paths_licenses=( ["golang.org/x/tools/go/packages/packagestest"]="BSD-3-Clause" # https://github.com/googleapis/go-genproto?tab=Apache-2.0-1-ov-file ["google.golang.org/genproto"]="Apache-2.0" - # https://github.com/kubernetes/api?tab=Apache-2.0-1-ov-file - ["k8s.io/api"]="Apache-2.0" # https://github.com/kubernetes/apiextensions-apiserver?tab=Apache-2.0-1-ov-file ["k8s.io/apiextensions-apiserver"]="Apache-2.0" - # https://github.com/kubernetes/apimachinery?tab=Apache-2.0-1-ov-file - ["k8s.io/apimachinery"]="Apache-2.0" # https://github.com/kubernetes/apiserver?tab=Apache-2.0-1-ov-file ["k8s.io/apiserver"]="Apache-2.0" - # https://github.com/kubernetes/client-go?tab=Apache-2.0-1-ov-file - ["k8s.io/client-go"]="Apache-2.0" # https://github.com/kubernetes/code-generator?tab=Apache-2.0-1-ov-file ["k8s.io/code-generator"]="Apache-2.0" # https://github.com/kubernetes/component-base?tab=Apache-2.0-1-ov-file @@ -208,10 +200,12 @@ declare -A ignored_paths_licenses=( ["k8s.io/kms"]="Apache-2.0" # https://github.com/kubernetes/kube-aggregator?tab=Apache-2.0-1-ov-file ["k8s.io/kube-aggregator"]="Apache-2.0" - # https://github.com/kubernetes/utils?tab=Apache-2.0-1-ov-file - ["k8s.io/utils"]="Apache-2.0" + # https://github.com/kubernetes/kube-openapi?tab=Apache-2.0-1-ov-file + ["k8s.io/kube-openapi"]="Apache-2.0" # https://github.com/kubernetes-sigs/controller-tools?tab=Apache-2.0-1-ov-file ["sigs.k8s.io/controller-tools"]="Apache-2.0" + # https://github.com/prometheus-operator/prometheus-operator?tab=Apache-2.0-1-ov-file + ["github.com/prometheus-operator/prometheus-operator"]="Apache-2.0" ) declare -A declared_licenses=( diff --git a/build/scripts/docker-run.sh b/build/scripts/docker-run.sh index 80fce64ff1..9de26f0109 100755 --- a/build/scripts/docker-run.sh +++ b/build/scripts/docker-run.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright (c) 2019-2023 Red Hat, Inc. +# Copyright (c) 2019-2026 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ @@ -29,7 +29,7 @@ init() { build() { printf "%bBuilding image %b${IMAGE_NAME}${NC}..." "${BOLD}" "${BLUE}" if docker build -t ${IMAGE_NAME} > docker-build-log 2>&1 -<`. + // The Secrets are mounted into `/etc/alertmanager/secrets/` in the 'alertmanager' container. + // +optional + Secrets []string `json:"secrets,omitempty"` + // configMaps defines a list of ConfigMaps in the same namespace as the Alertmanager + // object, which shall be mounted into the Alertmanager Pods. + // Each ConfigMap is added to the StatefulSet definition as a volume named `configmap-`. + // The ConfigMaps are mounted into `/etc/alertmanager/configmaps/` in the 'alertmanager' container. + // +optional + ConfigMaps []string `json:"configMaps,omitempty"` + // configSecret defines the name of a Kubernetes Secret in the same namespace as the + // Alertmanager object, which contains the configuration for this Alertmanager + // instance. If empty, it defaults to `alertmanager-`. + // + // The Alertmanager configuration should be available under the + // `alertmanager.yaml` key. Additional keys from the original secret are + // copied to the generated secret and mounted into the + // `/etc/alertmanager/config` directory in the `alertmanager` container. + // + // If either the secret or the `alertmanager.yaml` key is missing, the + // operator provisions a minimal Alertmanager configuration with one empty + // receiver (effectively dropping alert notifications). + // +optional + ConfigSecret string `json:"configSecret,omitempty"` + // logLevel for Alertmanager to be configured with. + // +kubebuilder:validation:Enum="";debug;info;warn;error + // +optional + LogLevel string `json:"logLevel,omitempty"` + // logFormat for Alertmanager to be configured with. + // +kubebuilder:validation:Enum="";logfmt;json + // +optional + LogFormat string `json:"logFormat,omitempty"` + // replicas defines the expected size of the alertmanager cluster. The controller will + // eventually make the size of the running cluster equal to the expected + // size. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + // retention defines the time duration Alertmanager shall retain data for. Default is '120h', + // and must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours). + // +kubebuilder:default:="120h" + // +optional + Retention GoDuration `json:"retention,omitempty"` + // storage defines the definition of how storage will be used by the Alertmanager + // instances. + // +optional + Storage *StorageSpec `json:"storage,omitempty"` + // volumes allows configuration of additional volumes on the output StatefulSet definition. + // Volumes specified will be appended to other volumes that are generated as a result of + // StorageSpec objects. + // +optional + Volumes []v1.Volume `json:"volumes,omitempty"` + // volumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. + // VolumeMounts specified will be appended to other VolumeMounts in the alertmanager container, + // that are generated as a result of StorageSpec objects. + // +optional + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` + // persistentVolumeClaimRetentionPolicy controls if and how PVCs are deleted during the lifecycle of a StatefulSet. + // The default behavior is all PVCs are retained. + // This is an alpha field from kubernetes 1.23 until 1.26 and a beta field from 1.26. + // It requires enabling the StatefulSetAutoDeletePVC feature gate. + // + // +optional + PersistentVolumeClaimRetentionPolicy *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + // externalUrl defines the URL used to access the Alertmanager web service. This is + // necessary to generate correct URLs. This is necessary if Alertmanager is not + // served from root of a DNS name. + // +optional + ExternalURL string `json:"externalUrl,omitempty"` + // routePrefix Alertmanager registers HTTP handlers for. This is useful, + // if using ExternalURL and a proxy is rewriting HTTP routes of a request, + // and the actual ExternalURL is still true, but the server serves requests + // under a different route prefix. For example for use with `kubectl proxy`. + // +optional + RoutePrefix string `json:"routePrefix,omitempty"` + // paused if set to true all actions on the underlying managed objects are not + // going to be performed, except for delete actions. + // +optional + Paused bool `json:"paused,omitempty"` + // nodeSelector defines which Nodes the Pods are scheduled on. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the resource requests and limits of the Pods. + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + // affinity defines the pod's scheduling constraints. + // +optional + Affinity *v1.Affinity `json:"affinity,omitempty"` + // tolerations defines the pod's tolerations. + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // topologySpreadConstraints defines the Pod's topology spread constraints. + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + // securityContext holds pod-level security attributes and common container settings. + // This defaults to the default PodSecurityContext. + // +optional + SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` + // dnsPolicy defines the DNS policy for the pods. + // + // +optional + DNSPolicy *DNSPolicy `json:"dnsPolicy,omitempty"` + // dnsConfig defines the DNS configuration for the pods. + // + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty"` + // enableServiceLinks defines whether information about services should be injected into pod's environment variables + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` + // serviceName defines the service name used by the underlying StatefulSet(s) as the governing service. + // If defined, the Service must be created before the Alertmanager resource in the same namespace and it must define a selector that matches the pod labels. + // If empty, the operator will create and manage a headless service named `alertmanager-operated` for Alertmanager resources. + // When deploying multiple Alertmanager resources in the same namespace, it is recommended to specify a different value for each. + // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id for more details. + // +optional + // +kubebuilder:validation:MinLength=1 + ServiceName *string `json:"serviceName,omitempty"` + // serviceAccountName is the name of the ServiceAccount to use to run the + // Prometheus Pods. + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + // listenLocal defines the Alertmanager server listen on loopback, so that it + // does not bind against the Pod IP. Note this is only for the Alertmanager + // UI, not the gossip communication. + // +optional + ListenLocal bool `json:"listenLocal,omitempty"` + // containers allows injecting additional containers. This is meant to + // allow adding an authentication proxy to an Alertmanager pod. + // Containers described here modify an operator generated container if they + // share the same name and modifications are done via a strategic merge + // patch. The current container names are: `alertmanager` and + // `config-reloader`. Overriding containers is entirely outside the scope + // of what the maintainers will support and by doing so, you accept that + // this behaviour may break at any time without notice. + // +optional + Containers []v1.Container `json:"containers,omitempty"` + // initContainers allows adding initContainers to the pod definition. Those can be used to e.g. + // fetch secrets for injection into the Alertmanager configuration from external sources. Any + // errors during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // InitContainers described here modify an operator + // generated init containers if they share the same name and modifications are + // done via a strategic merge patch. The current init container name is: + // `init-config-reloader`. Overriding init containers is entirely outside the + // scope of what the maintainers will support and by doing so, you accept that + // this behaviour may break at any time without notice. + // +optional + InitContainers []v1.Container `json:"initContainers,omitempty"` + // priorityClassName assigned to the Pods + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + // additionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. + // +optional + AdditionalPeers []string `json:"additionalPeers,omitempty"` + // clusterAdvertiseAddress defines the explicit address to advertise in cluster. + // Needs to be provided for non RFC1918 [1] (public) addresses. + // [1] RFC1918: https://tools.ietf.org/html/rfc1918 + // +optional + ClusterAdvertiseAddress string `json:"clusterAdvertiseAddress,omitempty"` + // clusterGossipInterval defines the interval between gossip attempts. + // +optional + ClusterGossipInterval GoDuration `json:"clusterGossipInterval,omitempty"` + // clusterLabel defines the identifier that uniquely identifies the Alertmanager cluster. + // You should only set it when the Alertmanager cluster includes Alertmanager instances which are external to this Alertmanager resource. In practice, the addresses of the external instances are provided via the `.spec.additionalPeers` field. + // +optional + ClusterLabel *string `json:"clusterLabel,omitempty"` + // clusterPushpullInterval defines the interval between pushpull attempts. + // +optional + ClusterPushpullInterval GoDuration `json:"clusterPushpullInterval,omitempty"` + // clusterPeerTimeout defines the timeout for cluster peering. + // +optional + ClusterPeerTimeout GoDuration `json:"clusterPeerTimeout,omitempty"` + // portName defines the port's name for the pods and governing service. + // Defaults to `web`. + // +kubebuilder:default:="web" + // +optional + PortName string `json:"portName,omitempty"` + // forceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica. + // Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. + // +optional + ForceEnableClusterMode bool `json:"forceEnableClusterMode,omitempty"` + // alertmanagerConfigSelector defines the selector to be used for to merge and configure Alertmanager with. + // +optional + AlertmanagerConfigSelector *metav1.LabelSelector `json:"alertmanagerConfigSelector,omitempty"` + // alertmanagerConfigNamespaceSelector defines the namespaces to be selected for AlertmanagerConfig discovery. If nil, only + // check own namespace. + // +optional + AlertmanagerConfigNamespaceSelector *metav1.LabelSelector `json:"alertmanagerConfigNamespaceSelector,omitempty"` + + // alertmanagerConfigMatcherStrategy defines how AlertmanagerConfig objects + // process incoming alerts. + // +optional + AlertmanagerConfigMatcherStrategy AlertmanagerConfigMatcherStrategy `json:"alertmanagerConfigMatcherStrategy,omitempty"` + + // minReadySeconds defines the minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing for it to be considered available. + // + // If unset, pods will be considered available as soon as they are ready. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + // hostAliases Pods configuration + // +listType=map + // +listMapKey=ip + // +optional + HostAliases []HostAlias `json:"hostAliases,omitempty"` + // web defines the web command line flags when starting Alertmanager. + // +optional + Web *AlertmanagerWebSpec `json:"web,omitempty"` + // limits defines the limits command line flags when starting Alertmanager. + // +optional + Limits *AlertmanagerLimitsSpec `json:"limits,omitempty"` + // clusterTLS defines the mutual TLS configuration for the Alertmanager cluster's gossip protocol. + // + // It requires Alertmanager >= 0.24.0. + // +optional + ClusterTLS *ClusterTLSConfig `json:"clusterTLS,omitempty"` + // alertmanagerConfiguration defines the configuration of Alertmanager. + // + // If defined, it takes precedence over the `configSecret` field. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // +optional + AlertmanagerConfiguration *AlertmanagerConfiguration `json:"alertmanagerConfiguration,omitempty"` + // automountServiceAccountToken defines whether a service account token should be automatically mounted in the pod. + // If the service account has `automountServiceAccountToken: true`, set the field to `false` to opt out of automounting API credentials. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + // enableFeatures defines the Alertmanager's feature flags. By default, no features are enabled. + // Enabling features which are disabled by default is entirely outside the + // scope of what the maintainers will support and by doing so, you accept + // that this behaviour may break at any time without notice. + // + // It requires Alertmanager >= 0.27.0. + // +optional + EnableFeatures []string `json:"enableFeatures,omitempty"` + // additionalArgs allows setting additional arguments for the 'Alertmanager' container. + // It is intended for e.g. activating hidden flags which are not supported by + // the dedicated configuration options yet. The arguments are passed as-is to the + // Alertmanager container which may cause issues if they are invalid or not supported + // by the given Alertmanager version. + // +optional + AdditionalArgs []Argument `json:"additionalArgs,omitempty"` + + // terminationGracePeriodSeconds defines the Optional duration in seconds the pod needs to terminate gracefully. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down) which may lead to data corruption. + // + // Defaults to 120 seconds. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + + // hostUsers supports the user space in Kubernetes. + // + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/user-namespaces/ + // + // + // The feature requires at least Kubernetes 1.28 with the `UserNamespacesSupport` feature gate enabled. + // Starting Kubernetes 1.33, the feature is enabled by default. + // + // +optional + HostUsers *bool `json:"hostUsers,omitempty"` +} + +type AlertmanagerConfigMatcherStrategy struct { + // type defines the strategy used by + // AlertmanagerConfig objects to match alerts in the routes and inhibition + // rules. + // + // The default value is `OnNamespace`. + // + // +kubebuilder:validation:Enum="OnNamespace";"OnNamespaceExceptForAlertmanagerNamespace";"None" + // +kubebuilder:default:="OnNamespace" + // +optional + Type AlertmanagerConfigMatcherStrategyType `json:"type,omitempty"` +} + +type AlertmanagerConfigMatcherStrategyType string + +const ( + // With `OnNamespace`, the route and inhibition rules of an + // AlertmanagerConfig object only process alerts that have a `namespace` + // label equal to the namespace of the object. + OnNamespaceConfigMatcherStrategyType AlertmanagerConfigMatcherStrategyType = "OnNamespace" + + // With `OnNamespaceExceptForAlertmanagerNamespace`, the route and inhibition rules of an + // AlertmanagerConfig object only process alerts that have a `namespace` + // label equal to the namespace of the object, unless the AlertmanagerConfig object + // is in the same namespace as the Alertmanager object, where it will process all alerts. + OnNamespaceExceptForAlertmanagerNamespaceConfigMatcherStrategyType AlertmanagerConfigMatcherStrategyType = "OnNamespaceExceptForAlertmanagerNamespace" + + // With `None`, the route and inhibition rules of an AlertmanagerConfig + // object process all incoming alerts. + NoneConfigMatcherStrategyType AlertmanagerConfigMatcherStrategyType = "None" +) + +// AlertmanagerConfiguration defines the Alertmanager configuration. +// +k8s:openapi-gen=true +type AlertmanagerConfiguration struct { + // name defines the name of the AlertmanagerConfig custom resource which is used to generate the Alertmanager configuration. + // It must be defined in the same namespace as the Alertmanager object. + // The operator will not enforce a `namespace` label for routes and inhibition rules. + // +kubebuilder:validation:MinLength=1 + // +optional + Name string `json:"name,omitempty"` + // global defines the global parameters of the Alertmanager configuration. + // +optional + Global *AlertmanagerGlobalConfig `json:"global,omitempty"` + // templates defines the custom notification templates. + // +optional + Templates []SecretOrConfigMap `json:"templates,omitempty"` +} + +// AlertmanagerGlobalConfig configures parameters that are valid in all other configuration contexts. +// See https://prometheus.io/docs/alerting/latest/configuration/#configuration-file +type AlertmanagerGlobalConfig struct { + // smtp defines global SMTP parameters. + // +optional + SMTPConfig *GlobalSMTPConfig `json:"smtp,omitempty"` + + // resolveTimeout defines the default value used by alertmanager if the alert does + // not include EndsAt, after this time passes it can declare the alert as resolved if it has not been updated. + // This has no impact on alerts from Prometheus, as they always include EndsAt. + // +optional + ResolveTimeout Duration `json:"resolveTimeout,omitempty"` + + // httpConfig defines the default HTTP configuration. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` + + // slackApiUrl defines the default Slack API URL. + // +optional + SlackAPIURL *v1.SecretKeySelector `json:"slackApiUrl,omitempty"` + + // opsGenieApiUrl defines the default OpsGenie API URL. + // +optional + OpsGenieAPIURL *v1.SecretKeySelector `json:"opsGenieApiUrl,omitempty"` + + // opsGenieApiKey defines the default OpsGenie API Key. + // +optional + OpsGenieAPIKey *v1.SecretKeySelector `json:"opsGenieApiKey,omitempty"` + + // pagerdutyUrl defines the default Pagerduty URL. + // +optional + PagerdutyURL *URL `json:"pagerdutyUrl,omitempty"` + + // telegram defines the default Telegram config + // +optional + TelegramConfig *GlobalTelegramConfig `json:"telegram,omitempty"` + + // jira defines the default configuration for Jira. + // +optional + JiraConfig *GlobalJiraConfig `json:"jira,omitempty"` + + // victorops defines the default configuration for VictorOps. + // +optional + VictorOpsConfig *GlobalVictorOpsConfig `json:"victorops,omitempty"` + + // rocketChat defines the default configuration for Rocket Chat. + // +optional + RocketChatConfig *GlobalRocketChatConfig `json:"rocketChat,omitempty"` + + // webex defines the default configuration for Jira. + // +optional + WebexConfig *GlobalWebexConfig `json:"webex,omitempty"` + + // wechat defines the default WeChat Config + // +optional + WeChatConfig *GlobalWeChatConfig `json:"wechat,omitempty"` +} + +// AlertmanagerStatus is the most recent observed status of the Alertmanager cluster. Read-only. +// More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type AlertmanagerStatus struct { + // paused defines whether any actions on the underlying managed objects are + // being performed. Only delete actions will be performed. + // +optional + Paused bool `json:"paused"` + // replicas defines the total number of non-terminated pods targeted by this Alertmanager + // object (their labels match the selector). + // +optional + Replicas int32 `json:"replicas"` + // updatedReplicas defines the total number of non-terminated pods targeted by this Alertmanager + // object that have the desired version spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas"` + // availableReplicas defines the total number of available pods (ready for at least minReadySeconds) + // targeted by this Alertmanager cluster. + // +optional + AvailableReplicas int32 `json:"availableReplicas"` + // unavailableReplicas defines the total number of unavailable pods targeted by this Alertmanager object. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas"` + // selector used to match the pods targeted by this Alertmanager object. + // +optional + Selector string `json:"selector,omitempty"` + // conditions defines the current state of the Alertmanager object. + // +listType=map + // +listMapKey=type + // +optional + Conditions []Condition `json:"conditions,omitempty"` +} + +func (a *Alertmanager) ExpectedReplicas() int { + if a.Spec.Replicas == nil { + return 1 + } + return int(*a.Spec.Replicas) +} + +func (a *Alertmanager) SetReplicas(i int) { a.Status.Replicas = int32(i) } +func (a *Alertmanager) SetUpdatedReplicas(i int) { a.Status.UpdatedReplicas = int32(i) } +func (a *Alertmanager) SetAvailableReplicas(i int) { a.Status.AvailableReplicas = int32(i) } +func (a *Alertmanager) SetUnavailableReplicas(i int) { a.Status.UnavailableReplicas = int32(i) } + +// AlertmanagerWebSpec defines the web command line flags when starting Alertmanager. +// +k8s:openapi-gen=true +type AlertmanagerWebSpec struct { + WebConfigFileFields `json:",inline"` + // getConcurrency defines the maximum number of GET requests processed concurrently. This corresponds to the + // Alertmanager's `--web.get-concurrency` flag. + // +optional + GetConcurrency *uint32 `json:"getConcurrency,omitempty"` + // timeout for HTTP requests. This corresponds to the Alertmanager's + // `--web.timeout` flag. + // +optional + Timeout *uint32 `json:"timeout,omitempty"` +} + +// AlertmanagerLimitsSpec defines the limits command line flags when starting Alertmanager. +// +k8s:openapi-gen=true +type AlertmanagerLimitsSpec struct { + // maxSilences defines the maximum number active and pending silences. This corresponds to the + // Alertmanager's `--silences.max-silences` flag. + // It requires Alertmanager >= v0.28.0. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + MaxSilences *int32 `json:"maxSilences,omitempty"` + // maxPerSilenceBytes defines the maximum size of an individual silence as stored on disk. This corresponds to the Alertmanager's + // `--silences.max-per-silence-bytes` flag. + // It requires Alertmanager >= v0.28.0. + // + // +optional + MaxPerSilenceBytes *ByteSize `json:"maxPerSilenceBytes,omitempty"` +} + +// GlobalSMTPConfig configures global SMTP parameters. +// See https://prometheus.io/docs/alerting/latest/configuration/#configuration-file +type GlobalSMTPConfig struct { + // from defines the default SMTP From header field. + // +optional + From *string `json:"from,omitempty"` + + // smartHost defines the default SMTP smarthost used for sending emails. + // +optional + SmartHost *HostPort `json:"smartHost,omitempty"` + + // hello defines the default hostname to identify to the SMTP server. + // +optional + Hello *string `json:"hello,omitempty"` + + // authUsername represents SMTP Auth using CRAM-MD5, LOGIN and PLAIN. If empty, Alertmanager doesn't authenticate to the SMTP server. + // +optional + AuthUsername *string `json:"authUsername,omitempty"` + + // authPassword represents SMTP Auth using LOGIN and PLAIN. + // +optional + AuthPassword *v1.SecretKeySelector `json:"authPassword,omitempty"` + + // authIdentity represents SMTP Auth using PLAIN + // +optional + AuthIdentity *string `json:"authIdentity,omitempty"` + + // authSecret represents SMTP Auth using CRAM-MD5. + // +optional + AuthSecret *v1.SecretKeySelector `json:"authSecret,omitempty"` + + // requireTLS defines the default SMTP TLS requirement. + // Note that Go does not support unencrypted connections to remote SMTP endpoints. + // +optional + RequireTLS *bool `json:"requireTLS,omitempty"` + + // tlsConfig defines the default TLS configuration for SMTP receivers + // +optional + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` +} + +// GlobalTelegramConfig configures global Telegram parameters. +type GlobalTelegramConfig struct { + // apiURL defines he default Telegram API URL. + // + // It requires Alertmanager >= v0.24.0. + // +optional + APIURL *URL `json:"apiURL,omitempty"` +} + +// GlobalJiraConfig configures global Jira parameters. +type GlobalJiraConfig struct { + // apiURL defines the default Jira API URL. + // + // It requires Alertmanager >= v0.28.0. + // + // +optional + APIURL *URL `json:"apiURL,omitempty"` +} + +// GlobalRocketChatConfig configures global Rocket Chat parameters. +type GlobalRocketChatConfig struct { + // apiURL defines the default Rocket Chat API URL. + // + // It requires Alertmanager >= v0.28.0. + // + // +optional + APIURL *URL `json:"apiURL,omitempty"` + + // token defines the default Rocket Chat token. + // + // It requires Alertmanager >= v0.28.0. + // + // +optional + Token *v1.SecretKeySelector `json:"token,omitempty"` + + // tokenID defines the default Rocket Chat Token ID. + // + // It requires Alertmanager >= v0.28.0. + // + // +optional + TokenID *v1.SecretKeySelector `json:"tokenID,omitempty"` +} + +// GlobalWebexConfig configures global Webex parameters. +// See https://prometheus.io/docs/alerting/latest/configuration/#configuration-file +type GlobalWebexConfig struct { + // apiURL defines the is the default Webex API URL. + // + // It requires Alertmanager >= v0.25.0. + // + // +optional + APIURL *URL `json:"apiURL,omitempty"` +} + +type GlobalWeChatConfig struct { + // apiURL defines he default WeChat API URL. + // The default value is "https://qyapi.weixin.qq.com/cgi-bin/" + // +optional + APIURL *URL `json:"apiURL,omitempty"` + + // apiSecret defines the default WeChat API Secret. + // +optional + APISecret *v1.SecretKeySelector `json:"apiSecret,omitempty"` + + // apiCorpID defines the default WeChat API Corporate ID. + // +optional + // +kubebuilder:validation:MinLength=1 + APICorpID *string `json:"apiCorpID,omitempty"` +} + +// GlobalVictorOpsConfig configures global VictorOps parameters. +type GlobalVictorOpsConfig struct { + // apiURL defines the default VictorOps API URL. + // + // +optional + APIURL *URL `json:"apiURL,omitempty"` + // apiKey defines the default VictorOps API Key. + // + // +optional + APIKey *v1.SecretKeySelector `json:"apiKey,omitempty"` +} + +// HostPort represents a "host:port" network address. +type HostPort struct { + // host defines the host's address, it can be a DNS name or a literal IP address. + // +kubebuilder:validation:MinLength=1 + // +required + Host string `json:"host"` + // port defines the host's port, it can be a literal port number or a port name. + // +kubebuilder:validation:MinLength=1 + // +required + Port string `json:"port"` +} + +// AlertmanagerList is a list of Alertmanagers. +// +k8s:openapi-gen=true +type AlertmanagerList struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + metav1.ListMeta `json:"metadata,omitempty"` + // List of Alertmanagers + Items []Alertmanager `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *AlertmanagerList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// ClusterTLSConfig defines the mutual TLS configuration for the Alertmanager cluster TLS protocol. +// +k8s:openapi-gen=true +type ClusterTLSConfig struct { + // server defines the server-side configuration for mutual TLS. + // +required + ServerTLS WebTLSConfig `json:"server"` + // client defines the client-side configuration for mutual TLS. + // +required + ClientTLS SafeTLSConfig `json:"client"` +} + +// URL represents a valid URL +// +kubebuilder:validation:Pattern:="^(http|https)://.+$" +type URL string diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/dns_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/dns_types.go new file mode 100644 index 0000000000..d68b698312 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/dns_types.go @@ -0,0 +1,83 @@ +// Copyright 2024 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package v1 + +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // nameservers defines the list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // +optional + // +listType:=set + // +kubebuilder:validation:items:MinLength:=1 + Nameservers []string `json:"nameservers,omitempty"` + + // searches defines the list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // +optional + // +listType:=set + // +kubebuilder:validation:items:MinLength:=1 + Searches []string `json:"searches,omitempty"` + + // options defines the list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + // +listType=map + // +listMapKey=name + Options []PodDNSConfigOption `json:"options,omitempty"` +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // name is required and must be unique. + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + + // value is optional. + // +optional + Value *string `json:"value,omitempty"` +} + +// DNSPolicy specifies the DNS policy for the pod. +// +kubebuilder:validation:Enum=ClusterFirstWithHostNet;ClusterFirst;Default;None +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet defines that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst defines that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault defines that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" + + // DNSNone defines that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" +) + +const ( +// DefaultTerminationGracePeriodSeconds indicates the default duration in +// seconds a pod needs to terminate gracefully. +) diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/doc.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/doc.go new file mode 100644 index 0000000000..64c4725273 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/doc.go @@ -0,0 +1,18 @@ +// Copyright 2017 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package +// +groupName=monitoring.coreos.com + +package v1 diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/http_config.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/http_config.go new file mode 100644 index 0000000000..3d7a0b8230 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/http_config.go @@ -0,0 +1,130 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "errors" + "fmt" + + v1 "k8s.io/api/core/v1" +) + +// HTTPConfig defines the configuration for the HTTP client. +type HTTPConfig struct { + // authorization configures the Authorization header credentials used by + // the client. + // + // Cannot be set at the same time as `basicAuth`, `bearerTokenSecret` or `oauth2`. + // + // +optional + Authorization *SafeAuthorization `json:"authorization,omitempty"` + + // basicAuth defines the Basic Authentication credentials used by the + // client. + // + // Cannot be set at the same time as `authorization`, `bearerTokenSecret` or `oauth2`. + // + // +optional + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + + // oauth2 defines the OAuth2 settings used by the client. + // + // It requires Prometheus >= 2.27.0. + // + // Cannot be set at the same time as `authorization`, `basicAuth` or `bearerTokenSecret`. + // + // +optional + OAuth2 *OAuth2 `json:"oauth2,omitempty"` + + // bearerTokenSecret defines a key of a Secret containing the bearer token + // used by the client for authentication. The secret needs to be in the + // same namespace as the custom resource and readable by the Prometheus + // Operator. + // + // Cannot be set at the same time as `authorization`, `basicAuth` or `oauth2`. + // + // +optional + // + // Deprecated: use `authorization` instead. + BearerTokenSecret *v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` + + // tlsConfig defines the TLS configuration used by the client. + // + // +optional + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` + + ProxyConfig `json:",inline"` + + // followRedirects defines whether the client should follow HTTP 3xx + // redirects. + // + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` + + // enableHttp2 can be used to disable HTTP2. + // + // +optional + EnableHTTP2 *bool `json:"enableHttp2,omitempty"` +} + +// Validate semantically validates the given HTTPConfig. +func (hc *HTTPConfig) Validate() error { + if hc == nil { + return nil + } + + // Check duplicate authentication methods. + switch { + case hc.Authorization != nil: + switch { + case hc.BasicAuth != nil: + return errors.New("authorization and basicAuth cannot be configured at the same time") + case hc.BearerTokenSecret != nil: + return errors.New("authorization and bearerTokenSecret cannot be configured at the same time") + case hc.OAuth2 != nil: + return errors.New("authorization and oauth2 cannot be configured at the same time") + } + case hc.BasicAuth != nil: + switch { + case hc.BearerTokenSecret != nil: + return errors.New("basicAuth and bearerTokenSecret cannot be configured at the same time") + case hc.OAuth2 != nil: + return errors.New("basicAuth and oauth2 cannot be configured at the same time") + } + case hc.BearerTokenSecret != nil: + switch { + case hc.OAuth2 != nil: + return errors.New("bearerTokenSecret and oauth2 cannot be configured at the same time") + } + } + + if err := hc.Authorization.Validate(); err != nil { + return fmt.Errorf("authorization: %w", err) + } + + if err := hc.OAuth2.Validate(); err != nil { + return fmt.Errorf("oauth2: %w", err) + } + + if err := hc.TLSConfig.Validate(); err != nil { + return fmt.Errorf("tlsConfig: %w", err) + } + + if err := hc.ProxyConfig.Validate(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go new file mode 100644 index 0000000000..58783ae828 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go @@ -0,0 +1,322 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + PodMonitorsKind = "PodMonitor" + PodMonitorName = "podmonitors" + PodMonitorKindKey = "podmonitor" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="pmon" +// +kubebuilder:subresource:status + +// The `PodMonitor` custom resource definition (CRD) defines how `Prometheus` and `PrometheusAgent` can scrape metrics from a group of pods. +// Among other things, it allows to specify: +// * The pods to scrape via label selectors. +// * The container ports to scrape. +// * Authentication credentials to use. +// * Target and metric relabeling. +// +// `Prometheus` and `PrometheusAgent` objects select `PodMonitor` objects using label and namespace selectors. +type PodMonitor struct { + // TypeMeta defines the versioned schema of this representation of an object. + // +optional + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of desired Pod selection for target discovery by Prometheus. + // +required + Spec PodMonitorSpec `json:"spec"` + // status defines the status subresource. It is under active development and is updated only when the + // "StatusForConfigurationResources" feature gate is enabled. + // + // Most recent observed status of the PodMonitor. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ConfigResourceStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PodMonitor) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +func (l *PodMonitor) Bindings() []WorkloadBinding { + return l.Status.Bindings +} + +// PodMonitorSpec contains specification parameters for a PodMonitor. +// +k8s:openapi-gen=true +type PodMonitorSpec struct { + // jobLabel defines the label to use to retrieve the job name from. + // `jobLabel` selects the label from the associated Kubernetes `Pod` + // object which will be used as the `job` label for all metrics. + // + // For example if `jobLabel` is set to `foo` and the Kubernetes `Pod` + // object is labeled with `foo: bar`, then Prometheus adds the `job="bar"` + // label to all ingested metrics. + // + // If the value of this field is empty, the `job` label of the metrics + // defaults to the namespace and name of the PodMonitor object (e.g. `/`). + // +optional + JobLabel string `json:"jobLabel,omitempty"` + + // podTargetLabels defines the labels which are transferred from the + // associated Kubernetes `Pod` object onto the ingested metrics. + // + // +optional + PodTargetLabels []string `json:"podTargetLabels,omitempty"` + + // podMetricsEndpoints defines how to scrape metrics from the selected pods. + // + // +optional + PodMetricsEndpoints []PodMetricsEndpoint `json:"podMetricsEndpoints"` + + // selector defines the label selector to select the Kubernetes `Pod` objects to scrape metrics from. + // +required + Selector metav1.LabelSelector `json:"selector"` + + // selectorMechanism defines the mechanism used to select the endpoints to scrape. + // By default, the selection process relies on relabel configurations to filter the discovered targets. + // Alternatively, you can opt in for role selectors, which may offer better efficiency in large clusters. + // Which strategy is best for your use case needs to be carefully evaluated. + // + // It requires Prometheus >= v2.17.0. + // + // +optional + SelectorMechanism *SelectorMechanism `json:"selectorMechanism,omitempty"` + + // namespaceSelector defines in which namespace(s) Prometheus should discover the pods. + // By default, the pods are discovered in the same namespace as the `PodMonitor` object but it is possible to select pods across different/all namespaces. + // +optional + NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` + + // sampleLimit defines a per-scrape limit on the number of scraped samples + // that will be accepted. + // + // +optional + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + + // targetLimit defines a limit on the number of scraped targets that will + // be accepted. + // + // +optional + TargetLimit *uint64 `json:"targetLimit,omitempty"` + + // scrapeProtocols defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + + // fallbackScrapeProtocol defines the protocol to use if a scrape returns blank, unparseable, or otherwise invalid Content-Type. + // + // It requires Prometheus >= v3.0.0. + // +optional + FallbackScrapeProtocol *ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + + // labelLimit defines the per-scrape limit on number of labels that will be accepted for a sample. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelLimit *uint64 `json:"labelLimit,omitempty"` + // labelNameLengthLimit defines the per-scrape limit on length of labels name that will be accepted for a sample. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + // labelValueLengthLimit defines the per-scrape limit on length of labels value that will be accepted for a sample. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + + NativeHistogramConfig `json:",inline"` + + // keepDroppedTargets defines the per-scrape limit on the number of targets dropped by relabeling + // that will be kept in memory. 0 means no limit. + // + // It requires Prometheus >= v2.47.0. + // + // +optional + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // attachMetadata defines additional metadata which is added to the + // discovered targets. + // + // It requires Prometheus >= v2.35.0. + // + // +optional + AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` + + // scrapeClass defines the scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` + + // bodySizeLimit when defined specifies a job level limit on the size + // of uncompressed response body that will be accepted by Prometheus. + // + // It requires Prometheus >= v2.28.0. + // + // +optional + BodySizeLimit *ByteSize `json:"bodySizeLimit,omitempty"` +} + +// PodMonitorList is a list of PodMonitors. +// +k8s:openapi-gen=true +type PodMonitorList struct { + // TypeMeta defines the versioned schema of this representation of an object. + // +optional + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + metav1.ListMeta `json:"metadata,omitempty"` + // List of PodMonitors + Items []PodMonitor `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PodMonitorList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// PodMetricsEndpoint defines an endpoint serving Prometheus metrics to be scraped by +// Prometheus. +// +// +k8s:openapi-gen=true +type PodMetricsEndpoint struct { + // port defines the `Pod` port name which exposes the endpoint. + // + // It takes precedence over the `portNumber` and `targetPort` fields. + // +optional + Port *string `json:"port,omitempty"` + + // portNumber defines the `Pod` port number which exposes the endpoint. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +optional + PortNumber *int32 `json:"portNumber,omitempty"` + + // targetPort defines the name or number of the target port of the `Pod` object behind the Service, the + // port must be specified with container port property. + // + // Deprecated: use 'port' or 'portNumber' instead. + // +optional + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` + + // path defines the HTTP path from which to scrape for metrics. + // + // If empty, Prometheus uses the default value (e.g. `/metrics`). + // +optional + Path string `json:"path,omitempty"` + + // scheme defines the HTTP scheme to use for scraping. + // + // `http` and `https` are the expected values unless you rewrite the + // `__scheme__` label via relabeling. + // + // If empty, Prometheus uses the default value `http`. + // + // +kubebuilder:validation:Enum=http;https + // +optional + Scheme string `json:"scheme,omitempty"` + + // params define optional HTTP URL parameters. + // +optional + Params map[string][]string `json:"params,omitempty"` + + // interval at which Prometheus scrapes the metrics from the target. + // + // If empty, Prometheus uses the global scrape interval. + // +optional + Interval Duration `json:"interval,omitempty"` + + // scrapeTimeout defines the timeout after which Prometheus considers the scrape to be failed. + // + // If empty, Prometheus uses the global scrape timeout unless it is less + // than the target's scrape interval value in which the latter is used. + // The value cannot be greater than the scrape interval otherwise the operator will reject the resource. + // +optional + ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` + + // honorLabels when true preserves the metric's labels when they collide + // with the target's labels. + // +optional + HonorLabels bool `json:"honorLabels,omitempty"` + + // honorTimestamps defines whether Prometheus preserves the timestamps + // when exposed by the target. + // + // +optional + HonorTimestamps *bool `json:"honorTimestamps,omitempty"` + + // trackTimestampsStaleness defines whether Prometheus tracks staleness of + // the metrics that have an explicit timestamp present in scraped data. + // Has no effect if `honorTimestamps` is false. + // + // It requires Prometheus >= v2.48.0. + // + // +optional + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` + + // metricRelabelings defines the relabeling rules to apply to the + // samples before ingestion. + // + // +optional + MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"` + + // relabelings defines the relabeling rules to apply the target's + // metadata labels. + // + // The Operator automatically adds relabelings for a few standard Kubernetes fields. + // + // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. + // + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // + // +optional + RelabelConfigs []RelabelConfig `json:"relabelings,omitempty"` + + // filterRunning when true, the pods which are not running (e.g. either in Failed or + // Succeeded state) are dropped during the target discovery. + // + // If unset, the filtering is enabled. + // + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase + // + // +optional + FilterRunning *bool `json:"filterRunning,omitempty"` + + HTTPConfig `json:",inline"` +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go new file mode 100644 index 0000000000..0dabfe5e9b --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go @@ -0,0 +1,296 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "errors" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + ProbesKind = "Probe" + ProbeName = "probes" + ProbeKindKey = "probe" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="prb" +// +kubebuilder:subresource:status + +// The `Probe` custom resource definition (CRD) defines how to scrape metrics from prober exporters such as the [blackbox exporter](https://github.com/prometheus/blackbox_exporter). +// +// The `Probe` resource needs 2 pieces of information: +// * The list of probed addresses which can be defined statically or by discovering Kubernetes Ingress objects. +// * The prober which exposes the availability of probed endpoints (over various protocols such HTTP, TCP, ICMP, ...) as Prometheus metrics. +// +// `Prometheus` and `PrometheusAgent` objects select `Probe` objects using label and namespace selectors. +type Probe struct { + // TypeMeta defines the versioned schema of this representation of an object. + // +optional + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of desired Ingress selection for target discovery by Prometheus. + // +required + Spec ProbeSpec `json:"spec"` + // status defines the status subresource. It is under active development and is updated only when the + // "StatusForConfigurationResources" feature gate is enabled. + // + // Most recent observed status of the Probe. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ConfigResourceStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *Probe) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +func (l *Probe) Bindings() []WorkloadBinding { + return l.Status.Bindings +} + +// ProbeSpec contains specification parameters for a Probe. +// +k8s:openapi-gen=true +type ProbeSpec struct { + // jobName assigned to scraped metrics by default. + // +optional + JobName string `json:"jobName,omitempty"` + // prober defines the specification for the prober to use for probing targets. + // The prober.URL parameter is required. Targets cannot be probed if left empty. + // +optional + ProberSpec ProberSpec `json:"prober,omitempty"` + // module to use for probing specifying how to probe the target. + // Example module configuring in the blackbox exporter: + // https://github.com/prometheus/blackbox_exporter/blob/master/example.yml + // +optional + Module string `json:"module,omitempty"` + // targets defines a set of static or dynamically discovered targets to probe. + // +optional + Targets ProbeTargets `json:"targets,omitempty"` + // interval at which targets are probed using the configured prober. + // If not specified Prometheus' global scrape interval is used. + // +optional + Interval Duration `json:"interval,omitempty"` + // scrapeTimeout defines the timeout for scraping metrics from the Prometheus exporter. + // If not specified, the Prometheus global scrape timeout is used. + // The value cannot be greater than the scrape interval otherwise the operator will reject the resource. + // +optional + ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` + // tlsConfig defines the TLS configuration to use when scraping the endpoint. + // +optional + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` + // bearerTokenSecret defines the secret to mount to read bearer token for scraping targets. The secret + // needs to be in the same namespace as the probe and accessible by + // the Prometheus Operator. + // +optional + BearerTokenSecret v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` + // basicAuth allow an endpoint to authenticate over basic authentication. + // More info: https://prometheus.io/docs/operating/configuration/#endpoint + // +optional + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + // oauth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer. + // +optional + OAuth2 *OAuth2 `json:"oauth2,omitempty"` + // metricRelabelings defines the RelabelConfig to apply to samples before ingestion. + // +optional + MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"` + // authorization section for this endpoint + // +optional + Authorization *SafeAuthorization `json:"authorization,omitempty"` + // sampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + // +optional + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + // targetLimit defines a limit on the number of scraped targets that will be accepted. + // +optional + TargetLimit *uint64 `json:"targetLimit,omitempty"` + // scrapeProtocols defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + // fallbackScrapeProtocol defines the protocol to use if a scrape returns blank, unparseable, or otherwise invalid Content-Type. + // + // It requires Prometheus >= v3.0.0. + // +optional + FallbackScrapeProtocol *ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + // labelLimit defines the per-scrape limit on number of labels that will be accepted for a sample. + // Only valid in Prometheus versions 2.27.0 and newer. + // +optional + LabelLimit *uint64 `json:"labelLimit,omitempty"` + // labelNameLengthLimit defines the per-scrape limit on length of labels name that will be accepted for a sample. + // Only valid in Prometheus versions 2.27.0 and newer. + // +optional + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + // labelValueLengthLimit defines the per-scrape limit on length of labels value that will be accepted for a sample. + // Only valid in Prometheus versions 2.27.0 and newer. + // +optional + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + + // +optional + NativeHistogramConfig `json:",inline"` + // keepDroppedTargets defines the per-scrape limit on the number of targets dropped by relabeling + // that will be kept in memory. 0 means no limit. + // + // It requires Prometheus >= v2.47.0. + // + // +optional + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // scrapeClass defines the scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` + + // params defines the list of HTTP query parameters for the scrape. + // Please note that the `.spec.module` field takes precedence over the `module` parameter from this list when both are defined. + // The module name must be added using Module under ProbeSpec. + // +optional + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=name + Params []ProbeParam `json:"params,omitempty"` +} + +// ProbeParam defines specification of extra parameters for a Probe. +// +k8s:openapi-gen=true +type ProbeParam struct { + // name defines the parameter name + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name,omitempty"` + // values defines the parameter values + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:MinLength=1 + // +optional + Values []string `json:"values,omitempty"` +} + +// ProbeTargets defines how to discover the probed targets. +// One of the `staticConfig` or `ingress` must be defined. +// If both are defined, `staticConfig` takes precedence. +// +k8s:openapi-gen=true +type ProbeTargets struct { + // staticConfig defines the static list of targets to probe and the + // relabeling configuration. + // If `ingress` is also defined, `staticConfig` takes precedence. + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config. + // +optional + StaticConfig *ProbeTargetStaticConfig `json:"staticConfig,omitempty"` + // ingress defines the Ingress objects to probe and the relabeling + // configuration. + // If `staticConfig` is also defined, `staticConfig` takes precedence. + // +optional + Ingress *ProbeTargetIngress `json:"ingress,omitempty"` +} + +// Validate semantically validates the given ProbeTargets. +func (it *ProbeTargets) Validate() error { + if it.StaticConfig == nil && it.Ingress == nil { + return errors.New("at least one of .spec.targets.staticConfig and .spec.targets.ingress is required") + } + + return nil +} + +// ProbeTargetStaticConfig defines the set of static targets considered for probing. +// +k8s:openapi-gen=true +type ProbeTargetStaticConfig struct { + // static defines the list of hosts to probe. + // +optional + Targets []string `json:"static,omitempty"` + // labels defines all labels assigned to all metrics scraped from the targets. + // +optional + Labels map[string]string `json:"labels,omitempty"` + // relabelingConfigs defines relabelings to be apply to the label set of the targets before it gets + // scraped. + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // +optional + RelabelConfigs []RelabelConfig `json:"relabelingConfigs,omitempty"` +} + +// ProbeTargetIngress defines the set of Ingress objects considered for probing. +// The operator configures a target for each host/path combination of each ingress object. +// +k8s:openapi-gen=true +type ProbeTargetIngress struct { + // selector to select the Ingress objects. + // +optional + Selector metav1.LabelSelector `json:"selector,omitempty"` + // namespaceSelector defines from which namespaces to select Ingress objects. + // +optional + NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` + // relabelingConfigs to apply to the label set of the target before it gets + // scraped. + // The original ingress address is available via the + // `__tmp_prometheus_ingress_address` label. It can be used to customize the + // probed URL. + // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // +optional + RelabelConfigs []RelabelConfig `json:"relabelingConfigs,omitempty"` +} + +// ProberSpec contains specification parameters for the Prober used for probing. +// +k8s:openapi-gen=true +type ProberSpec struct { + // url defines the mandatory URL of the prober. + // +required + URL string `json:"url"` + // scheme defines the HTTP scheme to use for scraping. + // `http` and `https` are the expected values unless you rewrite the `__scheme__` label via relabeling. + // If empty, Prometheus uses the default value `http`. + // +kubebuilder:validation:Enum=http;https + // +optional + Scheme string `json:"scheme,omitempty"` + // path to collect metrics from. + // Defaults to `/probe`. + // +kubebuilder:default:="/probe" + // +optional + Path string `json:"path,omitempty"` + + // +optional + ProxyConfig `json:",inline"` +} + +// ProbeList is a list of Probes. +// +k8s:openapi-gen=true +type ProbeList struct { + // TypeMeta defines the versioned schema of this representation of an object. + // +optional + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of Probes + // +required + Items []Probe `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ProbeList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go new file mode 100644 index 0000000000..71427906ef --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go @@ -0,0 +1,2561 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "errors" + "fmt" + "strings" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + PrometheusesKind = "Prometheus" + PrometheusName = "prometheuses" + PrometheusKindKey = "prometheus" +) + +// ScrapeProtocol represents a protocol used by Prometheus for scraping metrics. +// Supported values are: +// * `OpenMetricsText0.0.1` +// * `OpenMetricsText1.0.0` +// * `PrometheusProto` +// * `PrometheusText0.0.4` +// * `PrometheusText1.0.0` +// +kubebuilder:validation:Enum=PrometheusProto;OpenMetricsText0.0.1;OpenMetricsText1.0.0;PrometheusText0.0.4;PrometheusText1.0.0 +type ScrapeProtocol string + +const ( + PrometheusProto ScrapeProtocol = "PrometheusProto" + PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4" + PrometheusText1_0_0 ScrapeProtocol = "PrometheusText1.0.0" + OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1" + OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0" +) + +// RuntimeConfig configures the values for the process behavior. +type RuntimeConfig struct { + // goGC defines the Go garbage collection target percentage. Lowering this number may increase the CPU usage. + // See: https://tip.golang.org/doc/gc-guide#GOGC + // +optional + // +kubebuilder:validation:Minimum=-1 + GoGC *int32 `json:"goGC,omitempty"` +} + +// PrometheusInterface is used by Prometheus and PrometheusAgent to share common methods, e.g. config generation. +// +k8s:deepcopy-gen=false +type PrometheusInterface interface { + metav1.ObjectMetaAccessor + schema.ObjectKind + + GetCommonPrometheusFields() CommonPrometheusFields + SetCommonPrometheusFields(CommonPrometheusFields) + + GetStatus() PrometheusStatus +} + +var _ = PrometheusInterface(&Prometheus{}) + +func (l *Prometheus) GetCommonPrometheusFields() CommonPrometheusFields { + return l.Spec.CommonPrometheusFields +} + +func (l *Prometheus) SetCommonPrometheusFields(f CommonPrometheusFields) { + l.Spec.CommonPrometheusFields = f +} + +func (l *Prometheus) GetStatus() PrometheusStatus { + return l.Status +} + +// +kubebuilder:validation:Enum=OnResource;OnShard +type AdditionalLabelSelectors string + +const ( + // Automatically add a label selector that will select all pods matching the same Prometheus/PrometheusAgent resource (irrespective of their shards). + ResourceNameLabelSelector AdditionalLabelSelectors = "OnResource" + + // Automatically add a label selector that will select all pods matching the same shard. + ShardAndResourceNameLabelSelector AdditionalLabelSelectors = "OnShard" +) + +type CoreV1TopologySpreadConstraint v1.TopologySpreadConstraint + +type TopologySpreadConstraint struct { + CoreV1TopologySpreadConstraint `json:",inline"` + + // additionalLabelSelectors Defines what Prometheus Operator managed labels should be added to labelSelector on the topologySpreadConstraint. + // +optional + AdditionalLabelSelectors *AdditionalLabelSelectors `json:"additionalLabelSelectors,omitempty"` +} + +// +kubebuilder:validation:MinLength:=1 +type EnableFeature string + +// CommonPrometheusFields are the options available to both the Prometheus server and agent. +// +k8s:deepcopy-gen=true +type CommonPrometheusFields struct { + // podMetadata defines labels and annotations which are propagated to the Prometheus pods. + // + // The following items are reserved and cannot be overridden: + // * "prometheus" label, set to the name of the Prometheus object. + // * "app.kubernetes.io/instance" label, set to the name of the Prometheus object. + // * "app.kubernetes.io/managed-by" label, set to "prometheus-operator". + // * "app.kubernetes.io/name" label, set to "prometheus". + // * "app.kubernetes.io/version" label, set to the Prometheus version. + // * "operator.prometheus.io/name" label, set to the name of the Prometheus object. + // * "operator.prometheus.io/shard" label, set to the shard number of the Prometheus object. + // * "kubectl.kubernetes.io/default-container" annotation, set to "prometheus". + // +optional + PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` + + // serviceMonitorSelector defines the serviceMonitors to be selected for target discovery. An empty label + // selector matches all objects. A null label selector matches no objects. + // + // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` + // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. + // The Prometheus operator will ensure that the Prometheus configuration's + // Secret exists, but it is the responsibility of the user to provide the raw + // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. + // This behavior is *deprecated* and will be removed in the next major version + // of the custom resource definition. It is recommended to use + // `spec.additionalScrapeConfigs` instead. + // +optional + ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"` + // serviceMonitorNamespaceSelector defines the namespaces to match for ServicedMonitors discovery. An empty label selector + // matches all namespaces. A null label selector (default value) matches the current + // namespace only. + // +optional + ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"` + + // podMonitorSelector defines the podMonitors to be selected for target discovery. An empty label selector + // matches all objects. A null label selector matches no objects. + // + // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` + // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. + // The Prometheus operator will ensure that the Prometheus configuration's + // Secret exists, but it is the responsibility of the user to provide the raw + // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. + // This behavior is *deprecated* and will be removed in the next major version + // of the custom resource definition. It is recommended to use + // `spec.additionalScrapeConfigs` instead. + // +optional + PodMonitorSelector *metav1.LabelSelector `json:"podMonitorSelector,omitempty"` + // podMonitorNamespaceSelector defines the namespaces to match for PodMonitors discovery. An empty label selector + // matches all namespaces. A null label selector (default value) matches the current + // namespace only. + // +optional + PodMonitorNamespaceSelector *metav1.LabelSelector `json:"podMonitorNamespaceSelector,omitempty"` + + // probeSelector defines the probes to be selected for target discovery. An empty label selector + // matches all objects. A null label selector matches no objects. + // + // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` + // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. + // The Prometheus operator will ensure that the Prometheus configuration's + // Secret exists, but it is the responsibility of the user to provide the raw + // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. + // This behavior is *deprecated* and will be removed in the next major version + // of the custom resource definition. It is recommended to use + // `spec.additionalScrapeConfigs` instead. + // +optional + ProbeSelector *metav1.LabelSelector `json:"probeSelector,omitempty"` + // probeNamespaceSelector defines the namespaces to match for Probe discovery. An empty label + // selector matches all namespaces. A null label selector matches the + // current namespace only. + // +optional + ProbeNamespaceSelector *metav1.LabelSelector `json:"probeNamespaceSelector,omitempty"` + + // scrapeConfigSelector defines the scrapeConfigs to be selected for target discovery. An empty label + // selector matches all objects. A null label selector matches no objects. + // + // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` + // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. + // The Prometheus operator will ensure that the Prometheus configuration's + // Secret exists, but it is the responsibility of the user to provide the raw + // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. + // This behavior is *deprecated* and will be removed in the next major version + // of the custom resource definition. It is recommended to use + // `spec.additionalScrapeConfigs` instead. + // + // Note that the ScrapeConfig custom resource definition is currently at Alpha level. + // + // +optional + ScrapeConfigSelector *metav1.LabelSelector `json:"scrapeConfigSelector,omitempty"` + // scrapeConfigNamespaceSelector defines the namespaces to match for ScrapeConfig discovery. An empty label selector + // matches all namespaces. A null label selector matches the current + // namespace only. + // + // Note that the ScrapeConfig custom resource definition is currently at Alpha level. + // + // +optional + ScrapeConfigNamespaceSelector *metav1.LabelSelector `json:"scrapeConfigNamespaceSelector,omitempty"` + + // version of Prometheus being deployed. The operator uses this information + // to generate the Prometheus StatefulSet + configuration files. + // + // If not specified, the operator assumes the latest upstream version of + // Prometheus available at the time when the version of the operator was + // released. + // +optional + Version string `json:"version,omitempty"` + + // paused defines when a Prometheus deployment is paused, no actions except for deletion + // will be performed on the underlying objects. + // +optional + Paused bool `json:"paused,omitempty"` + + // image defines the container image name for Prometheus. If specified, it takes precedence + // over the `spec.baseImage`, `spec.tag` and `spec.sha` fields. + // + // Specifying `spec.version` is still necessary to ensure the Prometheus + // Operator knows which version of Prometheus is being configured. + // + // If neither `spec.image` nor `spec.baseImage` are defined, the operator + // will use the latest upstream version of Prometheus available at the time + // when the operator was released. + // + // +optional + Image *string `json:"image,omitempty"` + // imagePullPolicy defines the image pull policy for the 'prometheus', 'init-config-reloader' and 'config-reloader' containers. + // See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy for more details. + // +kubebuilder:validation:Enum="";Always;Never;IfNotPresent + // +optional + ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` + // imagePullSecrets defines an optional list of references to Secrets in the same namespace + // to use for pulling images from registries. + // See http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + // +optional + ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + + // replicas defines the number of replicas of each shard to deploy for a Prometheus deployment. + // `spec.replicas` multiplied by `spec.shards` is the total number of Pods + // created. + // + // Default: 1 + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // shards defines the number of shards to distribute the scraped targets onto. + // + // `spec.replicas` multiplied by `spec.shards` is the total number of Pods + // being created. + // + // When not defined, the operator assumes only one shard. + // + // Note that scaling down shards will not reshard data onto the remaining + // instances, it must be manually moved. Increasing shards will not reshard + // data either but it will continue to be available from the same + // instances. To query globally, use either + // * Thanos sidecar + querier for query federation and Thanos Ruler for rules. + // * Remote-write to send metrics to a central location. + // + // By default, the sharding of targets is performed on: + // * The `__address__` target's metadata label for PodMonitor, + // ServiceMonitor and ScrapeConfig resources. + // * The `__param_target__` label for Probe resources. + // + // Users can define their own sharding implementation by setting the + // `__tmp_hash` label during the target discovery with relabeling + // configuration (either in the monitoring resources or via scrape class). + // + // You can also disable sharding on a specific target by setting the + // `__tmp_disable_sharding` label with relabeling configuration. When + // the label value isn't empty, all Prometheus shards will scrape the target. + // +optional + Shards *int32 `json:"shards,omitempty"` + + // replicaExternalLabelName defines the name of Prometheus external label used to denote the replica name. + // The external label will _not_ be added when the field is set to the + // empty string (`""`). + // + // Default: "prometheus_replica" + // +optional + ReplicaExternalLabelName *string `json:"replicaExternalLabelName,omitempty"` + // prometheusExternalLabelName defines the name of Prometheus external label used to denote the Prometheus instance + // name. The external label will _not_ be added when the field is set to + // the empty string (`""`). + // + // Default: "prometheus" + // +optional + PrometheusExternalLabelName *string `json:"prometheusExternalLabelName,omitempty"` + + // logLevel for Prometheus and the config-reloader sidecar. + // +kubebuilder:validation:Enum="";debug;info;warn;error + // +optional + LogLevel string `json:"logLevel,omitempty"` + // logFormat for Log level for Prometheus and the config-reloader sidecar. + // +kubebuilder:validation:Enum="";logfmt;json + // +optional + LogFormat string `json:"logFormat,omitempty"` + + // scrapeInterval defines interval between consecutive scrapes. + // + // Default: "30s" + // +kubebuilder:default:="30s" + // +optional + ScrapeInterval Duration `json:"scrapeInterval,omitempty"` + // scrapeTimeout defines the number of seconds to wait until a scrape request times out. + // The value cannot be greater than the scrape interval otherwise the operator will reject the resource. + // +optional + ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` + + // scrapeProtocols defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // `PrometheusText1.0.0` requires Prometheus >= v3.0.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + + // externalLabels defines the labels to add to any time series or alerts when communicating with + // external systems (federation, remote storage, Alertmanager). + // Labels defined by `spec.replicaExternalLabelName` and + // `spec.prometheusExternalLabelName` take precedence over this list. + // +optional + ExternalLabels map[string]string `json:"externalLabels,omitempty"` + + // enableRemoteWriteReceiver defines the Prometheus to be used as a receiver for the Prometheus remote + // write protocol. + // + // WARNING: This is not considered an efficient way of ingesting samples. + // Use it with caution for specific low-volume use cases. + // It is not suitable for replacing the ingestion via scraping and turning + // Prometheus into a push-based metrics collection system. + // For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#remote-write-receiver + // + // It requires Prometheus >= v2.33.0. + // +optional + EnableRemoteWriteReceiver bool `json:"enableRemoteWriteReceiver,omitempty"` + + // enableOTLPReceiver defines the Prometheus to be used as a receiver for the OTLP Metrics protocol. + // + // Note that the OTLP receiver endpoint is automatically enabled if `.spec.otlpConfig` is defined. + // + // It requires Prometheus >= v2.47.0. + // +optional + EnableOTLPReceiver *bool `json:"enableOTLPReceiver,omitempty"` + + // remoteWriteReceiverMessageVersions list of the protobuf message versions to accept when receiving the + // remote writes. + // + // It requires Prometheus >= v2.54.0. + // + // +kubebuilder:validation:MinItems=1 + // +listType:=set + // +optional + RemoteWriteReceiverMessageVersions []RemoteWriteMessageVersion `json:"remoteWriteReceiverMessageVersions,omitempty"` + + // enableFeatures enables access to Prometheus feature flags. By default, no features are enabled. + // + // Enabling features which are disabled by default is entirely outside the + // scope of what the maintainers will support and by doing so, you accept + // that this behaviour may break at any time without notice. + // + // For more information see https://prometheus.io/docs/prometheus/latest/feature_flags/ + // + // +listType:=set + // +optional + EnableFeatures []EnableFeature `json:"enableFeatures,omitempty"` + + // externalUrl defines the external URL under which the Prometheus service is externally + // available. This is necessary to generate correct URLs (for instance if + // Prometheus is accessible behind an Ingress resource). + // +optional + ExternalURL string `json:"externalUrl,omitempty"` + // routePrefix defines the route prefix Prometheus registers HTTP handlers for. + // + // This is useful when using `spec.externalURL`, and a proxy is rewriting + // HTTP routes of a request, and the actual ExternalURL is still true, but + // the server serves requests under a different route prefix. For example + // for use with `kubectl proxy`. + // +optional + RoutePrefix string `json:"routePrefix,omitempty"` + + // storage defines the storage used by Prometheus. + // +optional + Storage *StorageSpec `json:"storage,omitempty"` + + // volumes allows the configuration of additional volumes on the output + // StatefulSet definition. Volumes specified will be appended to other + // volumes that are generated as a result of StorageSpec objects. + // +optional + Volumes []v1.Volume `json:"volumes,omitempty"` + // volumeMounts allows the configuration of additional VolumeMounts. + // + // VolumeMounts will be appended to other VolumeMounts in the 'prometheus' + // container, that are generated as a result of StorageSpec objects. + // +optional + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` + + // persistentVolumeClaimRetentionPolicy defines the field controls if and how PVCs are deleted during the lifecycle of a StatefulSet. + // The default behavior is all PVCs are retained. + // This is an alpha field from kubernetes 1.23 until 1.26 and a beta field from 1.26. + // It requires enabling the StatefulSetAutoDeletePVC feature gate. + // + // +optional + PersistentVolumeClaimRetentionPolicy *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + + // web defines the configuration of the Prometheus web server. + // +optional + Web *PrometheusWebSpec `json:"web,omitempty"` + + // resources defines the resources requests and limits of the 'prometheus' container. + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // nodeSelector defines on which Nodes the Pods are scheduled. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // serviceAccountName is the name of the ServiceAccount to use to run the + // Prometheus Pods. + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // automountServiceAccountToken defines whether a service account token should be automatically mounted in the pod. + // If the field isn't set, the operator mounts the service account token by default. + // + // **Warning:** be aware that by default, Prometheus requires the service account token for Kubernetes service discovery. + // It is possible to use strategic merge patch to project the service account token into the 'prometheus' container. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + + // secrets defines a list of Secrets in the same namespace as the Prometheus + // object, which shall be mounted into the Prometheus Pods. + // Each Secret is added to the StatefulSet definition as a volume named `secret-`. + // The Secrets are mounted into /etc/prometheus/secrets/ in the 'prometheus' container. + // +listType:=set + // +optional + Secrets []string `json:"secrets,omitempty"` + // configMaps defines a list of ConfigMaps in the same namespace as the Prometheus + // object, which shall be mounted into the Prometheus Pods. + // Each ConfigMap is added to the StatefulSet definition as a volume named `configmap-`. + // The ConfigMaps are mounted into /etc/prometheus/configmaps/ in the 'prometheus' container. + // +optional + ConfigMaps []string `json:"configMaps,omitempty"` + + // affinity defines the Pods' affinity scheduling rules if specified. + // +optional + Affinity *v1.Affinity `json:"affinity,omitempty"` + // tolerations defines the Pods' tolerations if specified. + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + + // topologySpreadConstraints defines the pod's topology spread constraints if specified. + // +optional + TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + + // remoteWrite defines the list of remote write configurations. + // +optional + RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"` + + // otlp defines the settings related to the OTLP receiver feature. + // It requires Prometheus >= v2.55.0. + // + // +optional + OTLP *OTLPConfig `json:"otlp,omitempty"` + + // securityContext holds pod-level security attributes and common container settings. + // This defaults to the default PodSecurityContext. + // +optional + SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` + + // dnsPolicy defines the DNS policy for the pods. + // + // +optional + DNSPolicy *DNSPolicy `json:"dnsPolicy,omitempty"` + // dnsConfig defines the DNS configuration for the pods. + // + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty"` + // listenLocal when true, the Prometheus server listens on the loopback address + // instead of the Pod IP's address. + // + // +optional + ListenLocal bool `json:"listenLocal,omitempty"` + + // enableServiceLinks defines whether information about services should be injected into pod's environment variables + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` + + // containers allows injecting additional containers or modifying operator + // generated containers. This can be used to allow adding an authentication + // proxy to the Pods or to change the behavior of an operator generated + // container. Containers described here modify an operator generated + // container if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of containers managed by the operator are: + // * `prometheus` + // * `config-reloader` + // * `thanos-sidecar` + // + // Overriding containers is entirely outside the scope of what the + // maintainers will support and by doing so, you accept that this behaviour + // may break at any time without notice. + // +optional + Containers []v1.Container `json:"containers,omitempty"` + // initContainers allows injecting initContainers to the Pod definition. Those + // can be used to e.g. fetch secrets for injection into the Prometheus + // configuration from external sources. Any errors during the execution of + // an initContainer will lead to a restart of the Pod. More info: + // https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // InitContainers described here modify an operator generated init + // containers if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of init container name managed by the operator are: + // * `init-config-reloader`. + // + // Overriding init containers is entirely outside the scope of what the + // maintainers will support and by doing so, you accept that this behaviour + // may break at any time without notice. + // +optional + InitContainers []v1.Container `json:"initContainers,omitempty"` + + // additionalScrapeConfigs allows specifying a key of a Secret containing + // additional Prometheus scrape configurations. Scrape configurations + // specified are appended to the configurations generated by the Prometheus + // Operator. Job configurations specified must have the form as specified + // in the official Prometheus documentation: + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. + // As scrape configs are appended, the user is responsible to make sure it + // is valid. Note that using this feature may expose the possibility to + // break upgrades of Prometheus. It is advised to review Prometheus release + // notes to ensure that no incompatible scrape configs are going to break + // Prometheus after the upgrade. + // +optional + AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"` + + // apiserverConfig allows specifying a host and auth methods to access the + // Kuberntees API server. + // If null, Prometheus is assumed to run inside of the cluster: it will + // discover the API servers automatically and use the Pod's CA certificate + // and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. + // +optional + APIServerConfig *APIServerConfig `json:"apiserverConfig,omitempty"` + + // priorityClassName assigned to the Pods. + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + // portName used for the pods and governing service. + // Default: "web" + // +kubebuilder:default:="web" + // +optional + PortName string `json:"portName,omitempty"` + + // arbitraryFSAccessThroughSMs when true, ServiceMonitor, PodMonitor and Probe object are forbidden to + // reference arbitrary files on the file system of the 'prometheus' + // container. + // When a ServiceMonitor's endpoint specifies a `bearerTokenFile` value + // (e.g. '/var/run/secrets/kubernetes.io/serviceaccount/token'), a + // malicious target can get access to the Prometheus service account's + // token in the Prometheus' scrape request. Setting + // `spec.arbitraryFSAccessThroughSM` to 'true' would prevent the attack. + // Users should instead provide the credentials using the + // `spec.bearerTokenSecret` field. + // +optional + ArbitraryFSAccessThroughSMs ArbitraryFSAccessThroughSMsConfig `json:"arbitraryFSAccessThroughSMs,omitempty"` + + // overrideHonorLabels when true, Prometheus resolves label conflicts by renaming the labels in the scraped data + // to “exported_” for all targets created from ServiceMonitor, PodMonitor and + // ScrapeConfig objects. Otherwise the HonorLabels field of the service or pod monitor applies. + // In practice,`OverrideHonorLabels:true` enforces `honorLabels:false` + // for all ServiceMonitor, PodMonitor and ScrapeConfig objects. + // +optional + OverrideHonorLabels bool `json:"overrideHonorLabels,omitempty"` + // overrideHonorTimestamps when true, Prometheus ignores the timestamps for all the targets created + // from service and pod monitors. + // Otherwise the HonorTimestamps field of the service or pod monitor applies. + // +optional + OverrideHonorTimestamps bool `json:"overrideHonorTimestamps,omitempty"` + + // ignoreNamespaceSelectors when true, `spec.namespaceSelector` from all PodMonitor, ServiceMonitor + // and Probe objects will be ignored. They will only discover targets + // within the namespace of the PodMonitor, ServiceMonitor and Probe + // object. + // +optional + IgnoreNamespaceSelectors bool `json:"ignoreNamespaceSelectors,omitempty"` + + // enforcedNamespaceLabel when not empty, a label will be added to: + // + // 1. All metrics scraped from `ServiceMonitor`, `PodMonitor`, `Probe` and `ScrapeConfig` objects. + // 2. All metrics generated from recording rules defined in `PrometheusRule` objects. + // 3. All alerts generated from alerting rules defined in `PrometheusRule` objects. + // 4. All vector selectors of PromQL expressions defined in `PrometheusRule` objects. + // + // The label will not added for objects referenced in `spec.excludedFromEnforcement`. + // + // The label's name is this field's value. + // The label's value is the namespace of the `ServiceMonitor`, + // `PodMonitor`, `Probe`, `PrometheusRule` or `ScrapeConfig` object. + // +optional + EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` + + // enforcedSampleLimit when defined specifies a global limit on the number + // of scraped samples that will be accepted. This overrides any + // `spec.sampleLimit` set by ServiceMonitor, PodMonitor, Probe objects + // unless `spec.sampleLimit` is greater than zero and less than + // `spec.enforcedSampleLimit`. + // + // It is meant to be used by admins to keep the overall number of + // samples/series under a desired limit. + // + // When both `enforcedSampleLimit` and `sampleLimit` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined sampleLimit value will inherit the global sampleLimit value (Prometheus >= 2.45.0) or the enforcedSampleLimit value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedSampleLimit` is greater than the `sampleLimit`, the `sampleLimit` will be set to `enforcedSampleLimit`. + // * Scrape objects with a sampleLimit value less than or equal to enforcedSampleLimit keep their specific value. + // * Scrape objects with a sampleLimit value greater than enforcedSampleLimit are set to enforcedSampleLimit. + // + // + // +optional + EnforcedSampleLimit *uint64 `json:"enforcedSampleLimit,omitempty"` + // enforcedTargetLimit when defined specifies a global limit on the number + // of scraped targets. The value overrides any `spec.targetLimit` set by + // ServiceMonitor, PodMonitor, Probe objects unless `spec.targetLimit` is + // greater than zero and less than `spec.enforcedTargetLimit`. + // + // It is meant to be used by admins to to keep the overall number of + // targets under a desired limit. + // + // When both `enforcedTargetLimit` and `targetLimit` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined targetLimit value will inherit the global targetLimit value (Prometheus >= 2.45.0) or the enforcedTargetLimit value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedTargetLimit` is greater than the `targetLimit`, the `targetLimit` will be set to `enforcedTargetLimit`. + // * Scrape objects with a targetLimit value less than or equal to enforcedTargetLimit keep their specific value. + // * Scrape objects with a targetLimit value greater than enforcedTargetLimit are set to enforcedTargetLimit. + // + // + // +optional + EnforcedTargetLimit *uint64 `json:"enforcedTargetLimit,omitempty"` + // enforcedLabelLimit when defined specifies a global limit on the number + // of labels per sample. The value overrides any `spec.labelLimit` set by + // ServiceMonitor, PodMonitor, Probe objects unless `spec.labelLimit` is + // greater than zero and less than `spec.enforcedLabelLimit`. + // + // It requires Prometheus >= v2.27.0. + // + // When both `enforcedLabelLimit` and `labelLimit` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined labelLimit value will inherit the global labelLimit value (Prometheus >= 2.45.0) or the enforcedLabelLimit value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedLabelLimit` is greater than the `labelLimit`, the `labelLimit` will be set to `enforcedLabelLimit`. + // * Scrape objects with a labelLimit value less than or equal to enforcedLabelLimit keep their specific value. + // * Scrape objects with a labelLimit value greater than enforcedLabelLimit are set to enforcedLabelLimit. + // + // + // +optional + EnforcedLabelLimit *uint64 `json:"enforcedLabelLimit,omitempty"` + // enforcedLabelNameLengthLimit when defined specifies a global limit on the length + // of labels name per sample. The value overrides any `spec.labelNameLengthLimit` set by + // ServiceMonitor, PodMonitor, Probe objects unless `spec.labelNameLengthLimit` is + // greater than zero and less than `spec.enforcedLabelNameLengthLimit`. + // + // It requires Prometheus >= v2.27.0. + // + // When both `enforcedLabelNameLengthLimit` and `labelNameLengthLimit` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined labelNameLengthLimit value will inherit the global labelNameLengthLimit value (Prometheus >= 2.45.0) or the enforcedLabelNameLengthLimit value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedLabelNameLengthLimit` is greater than the `labelNameLengthLimit`, the `labelNameLengthLimit` will be set to `enforcedLabelNameLengthLimit`. + // * Scrape objects with a labelNameLengthLimit value less than or equal to enforcedLabelNameLengthLimit keep their specific value. + // * Scrape objects with a labelNameLengthLimit value greater than enforcedLabelNameLengthLimit are set to enforcedLabelNameLengthLimit. + // + // + // +optional + EnforcedLabelNameLengthLimit *uint64 `json:"enforcedLabelNameLengthLimit,omitempty"` + // enforcedLabelValueLengthLimit when not null defines a global limit on the length + // of labels value per sample. The value overrides any `spec.labelValueLengthLimit` set by + // ServiceMonitor, PodMonitor, Probe objects unless `spec.labelValueLengthLimit` is + // greater than zero and less than `spec.enforcedLabelValueLengthLimit`. + // + // It requires Prometheus >= v2.27.0. + // + // When both `enforcedLabelValueLengthLimit` and `labelValueLengthLimit` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined labelValueLengthLimit value will inherit the global labelValueLengthLimit value (Prometheus >= 2.45.0) or the enforcedLabelValueLengthLimit value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedLabelValueLengthLimit` is greater than the `labelValueLengthLimit`, the `labelValueLengthLimit` will be set to `enforcedLabelValueLengthLimit`. + // * Scrape objects with a labelValueLengthLimit value less than or equal to enforcedLabelValueLengthLimit keep their specific value. + // * Scrape objects with a labelValueLengthLimit value greater than enforcedLabelValueLengthLimit are set to enforcedLabelValueLengthLimit. + // + // + // +optional + EnforcedLabelValueLengthLimit *uint64 `json:"enforcedLabelValueLengthLimit,omitempty"` + // enforcedKeepDroppedTargets when defined specifies a global limit on the number of targets + // dropped by relabeling that will be kept in memory. The value overrides + // any `spec.keepDroppedTargets` set by + // ServiceMonitor, PodMonitor, Probe objects unless `spec.keepDroppedTargets` is + // greater than zero and less than `spec.enforcedKeepDroppedTargets`. + // + // It requires Prometheus >= v2.47.0. + // + // When both `enforcedKeepDroppedTargets` and `keepDroppedTargets` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined keepDroppedTargets value will inherit the global keepDroppedTargets value (Prometheus >= 2.45.0) or the enforcedKeepDroppedTargets value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedKeepDroppedTargets` is greater than the `keepDroppedTargets`, the `keepDroppedTargets` will be set to `enforcedKeepDroppedTargets`. + // * Scrape objects with a keepDroppedTargets value less than or equal to enforcedKeepDroppedTargets keep their specific value. + // * Scrape objects with a keepDroppedTargets value greater than enforcedKeepDroppedTargets are set to enforcedKeepDroppedTargets. + // + // + // +optional + EnforcedKeepDroppedTargets *uint64 `json:"enforcedKeepDroppedTargets,omitempty"` + // enforcedBodySizeLimit when defined specifies a global limit on the size + // of uncompressed response body that will be accepted by Prometheus. + // Targets responding with a body larger than this many bytes will cause + // the scrape to fail. + // + // It requires Prometheus >= v2.28.0. + // + // When both `enforcedBodySizeLimit` and `bodySizeLimit` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined bodySizeLimit value will inherit the global bodySizeLimit value (Prometheus >= 2.45.0) or the enforcedBodySizeLimit value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedBodySizeLimit` is greater than the `bodySizeLimit`, the `bodySizeLimit` will be set to `enforcedBodySizeLimit`. + // * Scrape objects with a bodySizeLimit value less than or equal to enforcedBodySizeLimit keep their specific value. + // * Scrape objects with a bodySizeLimit value greater than enforcedBodySizeLimit are set to enforcedBodySizeLimit. + // + // +optional + EnforcedBodySizeLimit ByteSize `json:"enforcedBodySizeLimit,omitempty"` + + // nameValidationScheme defines the validation scheme for metric and label names. + // + // It requires Prometheus >= v2.55.0. + // + // +optional + NameValidationScheme *NameValidationSchemeOptions `json:"nameValidationScheme,omitempty"` + + // nameEscapingScheme defines the character escaping scheme that will be requested when scraping + // for metric and label names that do not conform to the legacy Prometheus + // character set. + // + // It requires Prometheus >= v3.4.0. + // + // +optional + NameEscapingScheme *NameEscapingSchemeOptions `json:"nameEscapingScheme,omitempty"` + + // convertClassicHistogramsToNHCB defines whether to convert all scraped classic histograms into a native + // histogram with custom buckets. + // + // It requires Prometheus >= v3.4.0. + // + // +optional + ConvertClassicHistogramsToNHCB *bool `json:"convertClassicHistogramsToNHCB,omitempty"` + + // scrapeClassicHistograms defines whether to scrape a classic histogram that is also exposed as a native histogram. + // + // Notice: `scrapeClassicHistograms` corresponds to the `always_scrape_classic_histograms` field in the Prometheus configuration. + // + // It requires Prometheus >= v3.5.0. + // + // +optional + ScrapeClassicHistograms *bool `json:"scrapeClassicHistograms,omitempty"` + + // minReadySeconds defines the minimum number of seconds for which a newly created Pod should be ready + // without any of its container crashing for it to be considered available. + // + // If unset, pods will be considered available as soon as they are ready. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + + // hostAliases defines the optional list of hosts and IPs that will be injected into the Pod's + // hosts file if specified. + // + // +listType=map + // +listMapKey=ip + // +optional + HostAliases []HostAlias `json:"hostAliases,omitempty"` + + // additionalArgs allows setting additional arguments for the 'prometheus' container. + // + // It is intended for e.g. activating hidden flags which are not supported by + // the dedicated configuration options yet. The arguments are passed as-is to the + // Prometheus container which may cause issues if they are invalid or not supported + // by the given Prometheus version. + // + // In case of an argument conflict (e.g. an argument which is already set by the + // operator itself) or when providing an invalid argument, the reconciliation will + // fail and an error will be logged. + // + // +optional + AdditionalArgs []Argument `json:"additionalArgs,omitempty"` + + // walCompression defines the compression of the write-ahead log (WAL) using Snappy. + // + // WAL compression is enabled by default for Prometheus >= 2.20.0 + // + // Requires Prometheus v2.11.0 and above. + // + // +optional + WALCompression *bool `json:"walCompression,omitempty"` + + // excludedFromEnforcement defines the list of references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects + // to be excluded from enforcing a namespace label of origin. + // + // It is only applicable if `spec.enforcedNamespaceLabel` set to true. + // + // +optional + ExcludedFromEnforcement []ObjectReference `json:"excludedFromEnforcement,omitempty"` + + // hostNetwork defines the host's network namespace if true. + // + // Make sure to understand the security implications if you want to enable + // it (https://kubernetes.io/docs/concepts/configuration/overview/ ). + // + // When hostNetwork is enabled, this will set the DNS policy to + // `ClusterFirstWithHostNet` automatically (unless `.spec.DNSPolicy` is set + // to a different value). + // + // +optional + HostNetwork bool `json:"hostNetwork,omitempty"` + + // podTargetLabels are appended to the `spec.podTargetLabels` field of all + // PodMonitor and ServiceMonitor objects. + // + // +optional + PodTargetLabels []string `json:"podTargetLabels,omitempty"` + + // tracingConfig defines tracing in Prometheus. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // +optional + TracingConfig *PrometheusTracingConfig `json:"tracingConfig,omitempty"` + // bodySizeLimit defines per-scrape on response body size. + // Only valid in Prometheus versions 2.45.0 and newer. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedBodySizeLimit. + // + // +optional + BodySizeLimit *ByteSize `json:"bodySizeLimit,omitempty"` + // sampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + // Only valid in Prometheus versions 2.45.0 and newer. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedSampleLimit. + // + // +optional + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + // targetLimit defines a limit on the number of scraped targets that will be accepted. + // Only valid in Prometheus versions 2.45.0 and newer. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedTargetLimit. + // + // +optional + TargetLimit *uint64 `json:"targetLimit,omitempty"` + // labelLimit defines per-scrape limit on number of labels that will be accepted for a sample. + // Only valid in Prometheus versions 2.45.0 and newer. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedLabelLimit. + // + // +optional + LabelLimit *uint64 `json:"labelLimit,omitempty"` + // labelNameLengthLimit defines the per-scrape limit on length of labels name that will be accepted for a sample. + // Only valid in Prometheus versions 2.45.0 and newer. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedLabelNameLengthLimit. + // + // +optional + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + // labelValueLengthLimit defines the per-scrape limit on length of labels value that will be accepted for a sample. + // Only valid in Prometheus versions 2.45.0 and newer. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedLabelValueLengthLimit. + // + // +optional + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + // keepDroppedTargets defines the per-scrape limit on the number of targets dropped by relabeling + // that will be kept in memory. 0 means no limit. + // + // It requires Prometheus >= v2.47.0. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedKeepDroppedTargets. + // + // +optional + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // reloadStrategy defines the strategy used to reload the Prometheus configuration. + // If not specified, the configuration is reloaded using the /-/reload HTTP endpoint. + // +optional + ReloadStrategy *ReloadStrategyType `json:"reloadStrategy,omitempty"` + + // maximumStartupDurationSeconds defines the maximum time that the `prometheus` container's startup probe will wait before being considered failed. The startup probe will return success after the WAL replay is complete. + // If set, the value should be greater than 60 (seconds). Otherwise it will be equal to 900 seconds (15 minutes). + // +optional + // +kubebuilder:validation:Minimum=60 + MaximumStartupDurationSeconds *int32 `json:"maximumStartupDurationSeconds,omitempty"` + + // scrapeClasses defines the list of scrape classes to expose to scraping objects such as + // PodMonitors, ServiceMonitors, Probes and ScrapeConfigs. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // +listType=map + // +listMapKey=name + // +optional + ScrapeClasses []ScrapeClass `json:"scrapeClasses,omitempty"` + + // serviceDiscoveryRole defines the service discovery role used to discover targets from + // `ServiceMonitor` objects and Alertmanager endpoints. + // + // If set, the value should be either "Endpoints" or "EndpointSlice". + // If unset, the operator assumes the "Endpoints" role. + // + // +optional + ServiceDiscoveryRole *ServiceDiscoveryRole `json:"serviceDiscoveryRole,omitempty"` + + // tsdb defines the runtime reloadable configuration of the timeseries database(TSDB). + // It requires Prometheus >= v2.39.0 or PrometheusAgent >= v2.54.0. + // + // +optional + TSDB *TSDBSpec `json:"tsdb,omitempty"` + + // scrapeFailureLogFile defines the file to which scrape failures are logged. + // Reloading the configuration will reopen the file. + // + // If the filename has an empty path, e.g. 'file.log', The Prometheus Pods + // will mount the file into an emptyDir volume at `/var/log/prometheus`. + // If a full path is provided, e.g. '/var/log/prometheus/file.log', you + // must mount a volume in the specified directory and it must be writable. + // It requires Prometheus >= v2.55.0. + // + // +kubebuilder:validation:MinLength=1 + // +optional + ScrapeFailureLogFile *string `json:"scrapeFailureLogFile,omitempty"` + + // serviceName defines the name of the service name used by the underlying StatefulSet(s) as the governing service. + // If defined, the Service must be created before the Prometheus/PrometheusAgent resource in the same namespace and it must define a selector that matches the pod labels. + // If empty, the operator will create and manage a headless service named `prometheus-operated` for Prometheus resources, + // or `prometheus-agent-operated` for PrometheusAgent resources. + // When deploying multiple Prometheus/PrometheusAgent resources in the same namespace, it is recommended to specify a different value for each. + // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id for more details. + // +optional + // +kubebuilder:validation:MinLength=1 + ServiceName *string `json:"serviceName,omitempty"` + + // runtime defines the values for the Prometheus process behavior + // +optional + Runtime *RuntimeConfig `json:"runtime,omitempty"` + + // terminationGracePeriodSeconds defines the optional duration in seconds the pod needs to terminate gracefully. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down) which may lead to data corruption. + // + // Defaults to 600 seconds. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + + // hostUsers supports the user space in Kubernetes. + // + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/user-namespaces/ + // + // + // The feature requires at least Kubernetes 1.28 with the `UserNamespacesSupport` feature gate enabled. + // Starting Kubernetes 1.33, the feature is enabled by default. + // + // +optional + HostUsers *bool `json:"hostUsers,omitempty"` +} + +// Specifies the validation scheme for metric and label names. +// +// Supported values are: +// - `UTF8NameValidationScheme` for UTF-8 support. +// - `LegacyNameValidationScheme` for letters, numbers, colons, and underscores. +// +// Note that `LegacyNameValidationScheme` cannot be used along with the +// OpenTelemetry `NoUTF8EscapingWithSuffixes` translation strategy (if +// enabled). +// +// +kubebuilder:validation:Enum=UTF8;Legacy +type NameValidationSchemeOptions string + +const ( + UTF8NameValidationScheme NameValidationSchemeOptions = "UTF8" + LegacyNameValidationScheme NameValidationSchemeOptions = "Legacy" +) + +// Specifies the character escaping scheme that will be applied when scraping +// for metric and label names that do not conform to the legacy Prometheus +// character set. +// +// Supported values are: +// +// - `AllowUTF8`, full UTF-8 support, no escaping needed. +// - `Underscores`, legacy-invalid characters are escaped to underscores. +// - `Dots`, dot characters are escaped to `_dot_`, underscores to `__`, and +// all other legacy-invalid characters to underscores. +// - `Values`, the string is prefixed by `U__` and all invalid characters are +// escaped to their unicode value, surrounded by underscores. +// +// +kubebuilder:validation:Enum=AllowUTF8;Underscores;Dots;Values +type NameEscapingSchemeOptions string + +const ( + AllowUTF8NameEscapingScheme NameEscapingSchemeOptions = "AllowUTF8" + UnderscoresNameEscapingScheme NameEscapingSchemeOptions = "Underscores" + DotsNameEscapingScheme NameEscapingSchemeOptions = "Dots" + ValuesNameEscapingScheme NameEscapingSchemeOptions = "Values" +) + +// +kubebuilder:validation:Enum=HTTP;ProcessSignal +type ReloadStrategyType string + +const ( + // HTTPReloadStrategyType reloads the configuration using the /-/reload HTTP endpoint. + HTTPReloadStrategyType ReloadStrategyType = "HTTP" + + // ProcessSignalReloadStrategyType reloads the configuration by sending a SIGHUP signal to the process. + ProcessSignalReloadStrategyType ReloadStrategyType = "ProcessSignal" +) + +// +kubebuilder:validation:Enum=Endpoints;EndpointSlice +type ServiceDiscoveryRole string + +const ( + EndpointsRole ServiceDiscoveryRole = "Endpoints" + EndpointSliceRole ServiceDiscoveryRole = "EndpointSlice" +) + +func (cpf *CommonPrometheusFields) PrometheusURIScheme() string { + if cpf.Web != nil && cpf.Web.TLSConfig != nil { + return "https" + } + + return "http" +} + +func (cpf *CommonPrometheusFields) WebRoutePrefix() string { + if cpf.RoutePrefix != "" { + return cpf.RoutePrefix + } + + return "/" +} + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="prom" +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of Prometheus" +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".spec.replicas",description="The number of desired replicas" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.availableReplicas",description="The number of ready replicas" +// +kubebuilder:printcolumn:name="Reconciled",type="string",JSONPath=".status.conditions[?(@.type == 'Reconciled')].status" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.conditions[?(@.type == 'Available')].status" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".status.paused",description="Whether the resource reconciliation is paused or not",priority=1 +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.shards,statuspath=.status.shards,selectorpath=.status.selector +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale + +// The `Prometheus` custom resource definition (CRD) defines a desired [Prometheus](https://prometheus.io/docs/prometheus) setup to run in a Kubernetes cluster. It allows to specify many options such as the number of replicas, persistent storage, and Alertmanagers where firing alerts should be sent and many more. +// +// For each `Prometheus` resource, the Operator deploys one or several `StatefulSet` objects in the same namespace. The number of StatefulSets is equal to the number of shards which is 1 by default. +// +// The resource defines via label and namespace selectors which `ServiceMonitor`, `PodMonitor`, `Probe` and `PrometheusRule` objects should be associated to the deployed Prometheus instances. +// +// The Operator continuously reconciles the scrape and rules configuration and a sidecar container running in the Prometheus pods triggers a reload of the configuration when needed. +type Prometheus struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of the desired behavior of the Prometheus cluster. More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +required + Spec PrometheusSpec `json:"spec"` + // status defines the most recent observed status of the Prometheus cluster. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PrometheusStatus `json:"status,omitempty"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *Prometheus) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// PrometheusList is a list of Prometheuses. +// +k8s:openapi-gen=true +type PrometheusList struct { + // TypeMeta defines the versioned schema of this representation of an object. + // +optional + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of Prometheuses + Items []Prometheus `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PrometheusList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// PrometheusSpec is a specification of the desired behavior of the Prometheus cluster. More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type PrometheusSpec struct { + // +optional + CommonPrometheusFields `json:",inline"` + + // baseImage is deprecated: use 'spec.image' instead. + // +optional + BaseImage string `json:"baseImage,omitempty"` + // tag is deprecated: use 'spec.image' instead. The image's tag can be specified as part of the image name. + // +optional + Tag string `json:"tag,omitempty"` + // sha is deprecated: use 'spec.image' instead. The image's digest can be specified as part of the image name. + // +optional + SHA string `json:"sha,omitempty"` + + // retention defines how long to retain the Prometheus data. + // + // Default: "24h" if `spec.retention` and `spec.retentionSize` are empty. + // +optional + Retention Duration `json:"retention,omitempty"` + // retentionSize defines the maximum number of bytes used by the Prometheus data. + // +optional + RetentionSize ByteSize `json:"retentionSize,omitempty"` + + // shardRetentionPolicy defines the retention policy for the Prometheus shards. + // (Alpha) Using this field requires the 'PrometheusShardRetentionPolicy' feature gate to be enabled. + // + // The final goals for this feature can be seen at https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/proposals/202310-shard-autoscaling.md#graceful-scale-down-of-prometheus-servers, + // however, the feature is not yet fully implemented in this PR. The limitation being: + // * Retention duration is not settable, for now, shards are retained forever. + // + // +optional + ShardRetentionPolicy *ShardRetentionPolicy `json:"shardRetentionPolicy,omitempty"` + + // disableCompaction when true, the Prometheus compaction is disabled. + // When `spec.thanos.objectStorageConfig` or `spec.objectStorageConfigFile` are defined, the operator automatically + // disables block compaction to avoid race conditions during block uploads (as the Thanos documentation recommends). + // +optional + DisableCompaction bool `json:"disableCompaction,omitempty"` + + // rules defines the configuration of the Prometheus rules' engine. + // +optional + Rules Rules `json:"rules,omitempty"` + // prometheusRulesExcludedFromEnforce defines the list of PrometheusRule objects to which the namespace label + // enforcement doesn't apply. + // This is only relevant when `spec.enforcedNamespaceLabel` is set to true. + // +optional + // Deprecated: use `spec.excludedFromEnforcement` instead. + PrometheusRulesExcludedFromEnforce []PrometheusRuleExcludeConfig `json:"prometheusRulesExcludedFromEnforce,omitempty"` + // ruleSelector defines the prometheusRule objects to be selected for rule evaluation. An empty + // label selector matches all objects. A null label selector matches no + // objects. + // +optional + RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"` + // ruleNamespaceSelector defines the namespaces to match for PrometheusRule discovery. An empty label selector + // matches all namespaces. A null label selector matches the current + // namespace only. + // +optional + RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"` + + // query defines the configuration of the Prometheus query service. + // +optional + Query *QuerySpec `json:"query,omitempty"` + + // alerting defines the settings related to Alertmanager. + // +optional + Alerting *AlertingSpec `json:"alerting,omitempty"` + // additionalAlertRelabelConfigs defines a key of a Secret containing + // additional Prometheus alert relabel configurations. The alert relabel + // configurations are appended to the configuration generated by the + // Prometheus Operator. They must be formatted according to the official + // Prometheus documentation: + // + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs + // + // The user is responsible for making sure that the configurations are valid + // + // Note that using this feature may expose the possibility to break + // upgrades of Prometheus. It is advised to review Prometheus release notes + // to ensure that no incompatible alert relabel configs are going to break + // Prometheus after the upgrade. + // +optional + AdditionalAlertRelabelConfigs *v1.SecretKeySelector `json:"additionalAlertRelabelConfigs,omitempty"` + // additionalAlertManagerConfigs defines a key of a Secret containing + // additional Prometheus Alertmanager configurations. The Alertmanager + // configurations are appended to the configuration generated by the + // Prometheus Operator. They must be formatted according to the official + // Prometheus documentation: + // + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config + // + // The user is responsible for making sure that the configurations are valid + // + // Note that using this feature may expose the possibility to break + // upgrades of Prometheus. It is advised to review Prometheus release notes + // to ensure that no incompatible AlertManager configs are going to break + // Prometheus after the upgrade. + // +optional + AdditionalAlertManagerConfigs *v1.SecretKeySelector `json:"additionalAlertManagerConfigs,omitempty"` + + // remoteRead defines the list of remote read configurations. + // +optional + RemoteRead []RemoteReadSpec `json:"remoteRead,omitempty"` + + // thanos defines the configuration of the optional Thanos sidecar. + // + // +optional + Thanos *ThanosSpec `json:"thanos,omitempty"` + + // queryLogFile specifies where the file to which PromQL queries are logged. + // + // If the filename has an empty path, e.g. 'query.log', The Prometheus Pods + // will mount the file into an emptyDir volume at `/var/log/prometheus`. + // If a full path is provided, e.g. '/var/log/prometheus/query.log', you + // must mount a volume in the specified directory and it must be writable. + // This is because the prometheus container runs with a read-only root + // filesystem for security reasons. + // Alternatively, the location can be set to a standard I/O stream, e.g. + // `/dev/stdout`, to log query information to the default Prometheus log + // stream. + // +optional + QueryLogFile string `json:"queryLogFile,omitempty"` + + // allowOverlappingBlocks enables vertical compaction and vertical query + // merge in Prometheus. + // + // Deprecated: this flag has no effect for Prometheus >= 2.39.0 where overlapping blocks are enabled by default. + // +optional + AllowOverlappingBlocks bool `json:"allowOverlappingBlocks,omitempty"` + + // exemplars related settings that are runtime reloadable. + // It requires to enable the `exemplar-storage` feature flag to be effective. + // +optional + Exemplars *Exemplars `json:"exemplars,omitempty"` + + // evaluationInterval defines the interval between rule evaluations. + // Default: "30s" + // +kubebuilder:default:="30s" + // +optional + EvaluationInterval Duration `json:"evaluationInterval,omitempty"` + + // ruleQueryOffset defines the offset the rule evaluation timestamp of this particular group by the specified duration into the past. + // It requires Prometheus >= v2.53.0. + // +optional + RuleQueryOffset *Duration `json:"ruleQueryOffset,omitempty"` + + // enableAdminAPI defines access to the Prometheus web admin API. + // + // WARNING: Enabling the admin APIs enables mutating endpoints, to delete data, + // shutdown Prometheus, and more. Enabling this should be done with care and the + // user is advised to add additional authentication authorization via a proxy to + // ensure only clients authorized to perform these actions can do so. + // + // For more information: + // https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis + // +optional + EnableAdminAPI bool `json:"enableAdminAPI,omitempty"` +} + +type WhenScaledRetentionType string + +var ( + RetainWhenScaledRetentionType WhenScaledRetentionType = "Retain" + DeleteWhenScaledRetentionType WhenScaledRetentionType = "Delete" +) + +type RetainConfig struct { + // retentionPeriod defines the retentionPeriod for shard retention policy. + // +required + RetentionPeriod Duration `json:"retentionPeriod"` +} + +type ShardRetentionPolicy struct { + // whenScaled defines the retention policy when the Prometheus shards are scaled down. + // * `Delete`, the operator will delete the pods from the scaled-down shard(s). + // * `Retain`, the operator will keep the pods from the scaled-down shard(s), so the data can still be queried. + // + // If not defined, the operator assumes the `Delete` value. + // +kubebuilder:validation:Enum=Retain;Delete + // +optional + WhenScaled *WhenScaledRetentionType `json:"whenScaled,omitempty"` + // retain defines the config for retention when the retention policy is set to `Retain`. + // This field is ineffective as of now. + // +optional + Retain *RetainConfig `json:"retain,omitempty"` +} + +type PrometheusTracingConfig struct { + // clientType defines the client used to export the traces. Supported values are `http` or `grpc`. + // +kubebuilder:validation:Enum=http;grpc + // +optional + ClientType *string `json:"clientType"` + + // endpoint to send the traces to. Should be provided in format :. + // +kubebuilder:validation:MinLength:=1 + // +required + Endpoint string `json:"endpoint"` + + // samplingFraction defines the probability a given trace will be sampled. Must be a float from 0 through 1. + // +optional + SamplingFraction *resource.Quantity `json:"samplingFraction"` + + // insecure if disabled, the client will use a secure connection. + // +optional + Insecure *bool `json:"insecure"` + + // headers defines the key-value pairs to be used as headers associated with gRPC or HTTP requests. + // +optional + Headers map[string]string `json:"headers"` + + // compression key for supported compression types. The only supported value is `gzip`. + // +kubebuilder:validation:Enum=gzip + // +optional + Compression *string `json:"compression"` + + // timeout defines the maximum time the exporter will wait for each batch export. + // +optional + Timeout *Duration `json:"timeout"` + + // tlsConfig to use when sending traces. + // +optional + TLSConfig *TLSConfig `json:"tlsConfig"` +} + +// PrometheusStatus is the most recent observed status of the Prometheus cluster. +// More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type PrometheusStatus struct { + // paused defines whether any actions on the underlying managed objects are + // being performed. Only delete actions will be performed. + // +optional + Paused bool `json:"paused"` + // replicas defines the total number of non-terminated pods targeted by this Prometheus deployment + // (their labels match the selector). + // +optional + Replicas int32 `json:"replicas"` + // updatedReplicas defines the total number of non-terminated pods targeted by this Prometheus deployment + // that have the desired version spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas"` + // availableReplicas defines the total number of available pods (ready for at least minReadySeconds) + // targeted by this Prometheus deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas"` + // unavailableReplicas defines the total number of unavailable pods targeted by this Prometheus deployment. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas"` + // conditions defines the current state of the Prometheus deployment. + // +listType=map + // +listMapKey=type + // +optional + Conditions []Condition `json:"conditions,omitempty"` + // shardStatuses defines the list has one entry per shard. Each entry provides a summary of the shard status. + // +listType=map + // +listMapKey=shardID + // +optional + ShardStatuses []ShardStatus `json:"shardStatuses,omitempty"` + // shards defines the most recently observed number of shards. + // +optional + Shards int32 `json:"shards,omitempty"` + // selector used to match the pods targeted by this Prometheus resource. + // +optional + Selector string `json:"selector,omitempty"` +} + +// AlertingSpec defines parameters for alerting configuration of Prometheus servers. +// +k8s:openapi-gen=true +type AlertingSpec struct { + // alertmanagers endpoints where Prometheus should send alerts to. + // +required + Alertmanagers []AlertmanagerEndpoints `json:"alertmanagers"` +} + +// StorageSpec defines the configured storage for a group Prometheus servers. +// If no storage option is specified, then by default an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) will be used. +// +// If multiple storage options are specified, priority will be given as follows: +// 1. emptyDir +// 2. ephemeral +// 3. volumeClaimTemplate +// +// +k8s:openapi-gen=true +type StorageSpec struct { + // disableMountSubPath deprecated: subPath usage will be removed in a future release. + // +optional + DisableMountSubPath bool `json:"disableMountSubPath,omitempty"` + // emptyDir to be used by the StatefulSet. + // If specified, it takes precedence over `ephemeral` and `volumeClaimTemplate`. + // More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir + // +optional + EmptyDir *v1.EmptyDirVolumeSource `json:"emptyDir,omitempty"` + // ephemeral to be used by the StatefulSet. + // This is a beta field in k8s 1.21 and GA in 1.15. + // For lower versions, starting with k8s 1.19, it requires enabling the GenericEphemeralVolume feature gate. + // More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes + // +optional + Ephemeral *v1.EphemeralVolumeSource `json:"ephemeral,omitempty"` + // volumeClaimTemplate defines the PVC spec to be used by the Prometheus StatefulSets. + // The easiest way to use a volume that cannot be automatically provisioned + // is to use a label selector alongside manually created PersistentVolumes. + // +optional + VolumeClaimTemplate EmbeddedPersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` +} + +// QuerySpec defines the query command line flags when starting Prometheus. +// +k8s:openapi-gen=true +type QuerySpec struct { + // lookbackDelta defines the delta difference allowed for retrieving metrics during expression evaluations. + // +optional + LookbackDelta *string `json:"lookbackDelta,omitempty"` + // maxConcurrency defines the number of concurrent queries that can be run at once. + // +kubebuilder:validation:Minimum:=1 + // +optional + MaxConcurrency *int32 `json:"maxConcurrency,omitempty"` + // maxSamples defines the maximum number of samples a single query can load into memory. Note that + // queries will fail if they would load more samples than this into memory, + // so this also limits the number of samples a query can return. + // +optional + MaxSamples *int32 `json:"maxSamples,omitempty"` + // timeout defines the maximum time a query may take before being aborted. + // +optional + Timeout *Duration `json:"timeout,omitempty"` +} + +// PrometheusWebSpec defines the configuration of the Prometheus web server. +// +k8s:openapi-gen=true +type PrometheusWebSpec struct { + // +optional + WebConfigFileFields `json:",inline"` + // pageTitle defines the prometheus web page title. + // +optional + PageTitle *string `json:"pageTitle,omitempty"` + + // maxConnections defines the maximum number of simultaneous connections + // A zero value means that Prometheus doesn't accept any incoming connection. + // +kubebuilder:validation:Minimum:=0 + // +optional + MaxConnections *int32 `json:"maxConnections,omitempty"` +} + +// ThanosSpec defines the configuration of the Thanos sidecar. +// +k8s:openapi-gen=true +type ThanosSpec struct { + // image defines the container image name for Thanos. If specified, it takes precedence over + // the `spec.thanos.baseImage`, `spec.thanos.tag` and `spec.thanos.sha` + // fields. + // + // Specifying `spec.thanos.version` is still necessary to ensure the + // Prometheus Operator knows which version of Thanos is being configured. + // + // If neither `spec.thanos.image` nor `spec.thanos.baseImage` are defined, + // the operator will use the latest upstream version of Thanos available at + // the time when the operator was released. + // + // +optional + Image *string `json:"image,omitempty"` + + // version of Thanos being deployed. The operator uses this information + // to generate the Prometheus StatefulSet + configuration files. + // + // If not specified, the operator assumes the latest upstream release of + // Thanos available at the time when the version of the operator was + // released. + // + // +optional + Version *string `json:"version,omitempty"` + // tag is deprecated: use 'image' instead. The image's tag can be specified as as part of the image name. + // +optional + Tag *string `json:"tag,omitempty"` + // sha is deprecated: use 'image' instead. The image digest can be specified as part of the image name. + // +optional + SHA *string `json:"sha,omitempty"` + // baseImage is deprecated: use 'image' instead. + // +optional + BaseImage *string `json:"baseImage,omitempty"` + + // resources defines the resources requests and limits of the Thanos sidecar. + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // objectStorageConfig defines the Thanos sidecar's configuration to upload TSDB blocks to object storage. + // + // More info: https://thanos.io/tip/thanos/storage.md/ + // + // objectStorageConfigFile takes precedence over this field. + // +optional + ObjectStorageConfig *v1.SecretKeySelector `json:"objectStorageConfig,omitempty"` + // objectStorageConfigFile defines the Thanos sidecar's configuration file to upload TSDB blocks to object storage. + // + // More info: https://thanos.io/tip/thanos/storage.md/ + // + // This field takes precedence over objectStorageConfig. + // +optional + ObjectStorageConfigFile *string `json:"objectStorageConfigFile,omitempty"` + + // listenLocal is deprecated: use `grpcListenLocal` and `httpListenLocal` instead. + // +optional + ListenLocal bool `json:"listenLocal,omitempty"` + + // grpcListenLocal defines when true, the Thanos sidecar listens on the loopback interface instead + // of the Pod IP's address for the gRPC endpoints. + // + // It has no effect if `listenLocal` is true. + // +optional + GRPCListenLocal bool `json:"grpcListenLocal,omitempty"` + + // httpListenLocal when true, the Thanos sidecar listens on the loopback interface instead + // of the Pod IP's address for the HTTP endpoints. + // + // It has no effect if `listenLocal` is true. + // +optional + HTTPListenLocal bool `json:"httpListenLocal,omitempty"` + + // tracingConfig defines the tracing configuration for the Thanos sidecar. + // + // `tracingConfigFile` takes precedence over this field. + // + // More info: https://thanos.io/tip/thanos/tracing.md/ + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // +optional + TracingConfig *v1.SecretKeySelector `json:"tracingConfig,omitempty"` + // tracingConfigFile defines the tracing configuration file for the Thanos sidecar. + // + // This field takes precedence over `tracingConfig`. + // + // More info: https://thanos.io/tip/thanos/tracing.md/ + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // +optional + TracingConfigFile string `json:"tracingConfigFile,omitempty"` + + // grpcServerTlsConfig defines the TLS parameters for the gRPC server providing the StoreAPI. + // + // Note: Currently only the `caFile`, `certFile`, and `keyFile` fields are supported. + // + // +optional + GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` + + // logLevel for the Thanos sidecar. + // +kubebuilder:validation:Enum="";debug;info;warn;error + // +optional + LogLevel string `json:"logLevel,omitempty"` + // logFormat for the Thanos sidecar. + // +kubebuilder:validation:Enum="";logfmt;json + // +optional + LogFormat string `json:"logFormat,omitempty"` + + // minTime defines the start of time range limit served by the Thanos sidecar's StoreAPI. + // The field's value should be a constant time in RFC3339 format or a time + // duration relative to current time, such as -1d or 2h45m. Valid duration + // units are ms, s, m, h, d, w, y. + // +optional + MinTime string `json:"minTime,omitempty"` + + // blockSize controls the size of TSDB blocks produced by Prometheus. + // The default value is 2h to match the upstream Prometheus defaults. + // + // WARNING: Changing the block duration can impact the performance and + // efficiency of the entire Prometheus/Thanos stack due to how it interacts + // with memory and Thanos compactors. It is recommended to keep this value + // set to a multiple of 120 times your longest scrape or rule interval. For + // example, 30s * 120 = 1h. + // + // +kubebuilder:default:="2h" + // +optional + BlockDuration Duration `json:"blockSize,omitempty"` + + // readyTimeout defines the maximum time that the Thanos sidecar will wait for + // Prometheus to start. + // +optional + ReadyTimeout Duration `json:"readyTimeout,omitempty"` + // getConfigInterval defines how often to retrieve the Prometheus configuration. + // +optional + GetConfigInterval Duration `json:"getConfigInterval,omitempty"` + // getConfigTimeout defines the maximum time to wait when retrieving the Prometheus configuration. + // +optional + GetConfigTimeout Duration `json:"getConfigTimeout,omitempty"` + + // volumeMounts allows configuration of additional VolumeMounts for Thanos. + // VolumeMounts specified will be appended to other VolumeMounts in the + // 'thanos-sidecar' container. + // +optional + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` + + // additionalArgs allows setting additional arguments for the Thanos container. + // The arguments are passed as-is to the Thanos container which may cause issues + // if they are invalid or not supported the given Thanos version. + // In case of an argument conflict (e.g. an argument which is already set by the + // operator itself) or when providing an invalid argument, the reconciliation will + // fail and an error will be logged. + // +optional + AdditionalArgs []Argument `json:"additionalArgs,omitempty"` +} + +// RemoteWriteSpec defines the configuration to write samples from Prometheus +// to a remote endpoint. +// +k8s:openapi-gen=true +type RemoteWriteSpec struct { + // url defines the URL of the endpoint to send samples to. + // +kubebuilder:validation:MinLength=1 + // +required + URL string `json:"url"` + + // name of the remote write queue, it must be unique if specified. The + // name is used in metrics and logging in order to differentiate queues. + // + // It requires Prometheus >= v2.15.0 or Thanos >= 0.24.0. + // + // +optional + Name *string `json:"name,omitempty"` + + // messageVersion defines the Remote Write message's version to use when writing to the endpoint. + // + // `Version1.0` corresponds to the `prometheus.WriteRequest` protobuf message introduced in Remote Write 1.0. + // `Version2.0` corresponds to the `io.prometheus.write.v2.Request` protobuf message introduced in Remote Write 2.0. + // + // When `Version2.0` is selected, Prometheus will automatically be + // configured to append the metadata of scraped metrics to the WAL. + // + // Before setting this field, consult with your remote storage provider + // what message version it supports. + // + // It requires Prometheus >= v2.54.0 or Thanos >= v0.37.0. + // + // +optional + MessageVersion *RemoteWriteMessageVersion `json:"messageVersion,omitempty"` + + // sendExemplars enables sending of exemplars over remote write. Note that + // exemplar-storage itself must be enabled using the `spec.enableFeatures` + // option for exemplars to be scraped in the first place. + // + // It requires Prometheus >= v2.27.0 or Thanos >= v0.24.0. + // + // +optional + SendExemplars *bool `json:"sendExemplars,omitempty"` + + // sendNativeHistograms enables sending of native histograms, also known as sparse histograms + // over remote write. + // + // It requires Prometheus >= v2.40.0 or Thanos >= v0.30.0. + // + // +optional + SendNativeHistograms *bool `json:"sendNativeHistograms,omitempty"` + + // remoteTimeout defines the timeout for requests to the remote write endpoint. + // +optional + RemoteTimeout *Duration `json:"remoteTimeout,omitempty"` + + // headers defines the custom HTTP headers to be sent along with each remote write request. + // Be aware that headers that are set by Prometheus itself can't be overwritten. + // + // It requires Prometheus >= v2.25.0 or Thanos >= v0.24.0. + // + // +optional + Headers map[string]string `json:"headers,omitempty"` + + // writeRelabelConfigs defines the list of remote write relabel configurations. + // +optional + WriteRelabelConfigs []RelabelConfig `json:"writeRelabelConfigs,omitempty"` + + // oauth2 configuration for the URL. + // + // It requires Prometheus >= v2.27.0 or Thanos >= v0.24.0. + // + // Cannot be set at the same time as `sigv4`, `authorization`, `basicAuth`, or `azureAd`. + // +optional + OAuth2 *OAuth2 `json:"oauth2,omitempty"` + + // basicAuth configuration for the URL. + // + // Cannot be set at the same time as `sigv4`, `authorization`, `oauth2`, or `azureAd`. + // + // +optional + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + + // bearerTokenFile defines the file from which to read bearer token for the URL. + // + // Deprecated: this will be removed in a future release. Prefer using `authorization`. + // +optional + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + + // authorization section for the URL. + // + // It requires Prometheus >= v2.26.0 or Thanos >= v0.24.0. + // + // Cannot be set at the same time as `sigv4`, `basicAuth`, `oauth2`, or `azureAd`. + // + // +optional + Authorization *Authorization `json:"authorization,omitempty"` + + // sigv4 defines the AWS's Signature Verification 4 for the URL. + // + // It requires Prometheus >= v2.26.0 or Thanos >= v0.24.0. + // + // Cannot be set at the same time as `authorization`, `basicAuth`, `oauth2`, or `azureAd`. + // + // +optional + Sigv4 *Sigv4 `json:"sigv4,omitempty"` + + // azureAd for the URL. + // + // It requires Prometheus >= v2.45.0 or Thanos >= v0.31.0. + // + // Cannot be set at the same time as `authorization`, `basicAuth`, `oauth2`, or `sigv4`. + // + // +optional + AzureAD *AzureAD `json:"azureAd,omitempty"` + + // bearerToken is deprecated: this will be removed in a future release. + // *Warning: this field shouldn't be used because the token value appears + // in clear-text. Prefer using `authorization`.* + // + // +optional + BearerToken string `json:"bearerToken,omitempty"` + + // tlsConfig to use for the URL. + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + + // Optional ProxyConfig. + // +optional + ProxyConfig `json:",inline"` + + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // + // It requires Prometheus >= v2.26.0 or Thanos >= v0.24.0. + // + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` + + // queueConfig allows tuning of the remote write queue parameters. + // +optional + QueueConfig *QueueConfig `json:"queueConfig,omitempty"` + + // metadataConfig defines how to send a series metadata to the remote storage. + // +optional + MetadataConfig *MetadataConfig `json:"metadataConfig,omitempty"` + + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHttp2 *bool `json:"enableHTTP2,omitempty"` + + // roundRobinDNS controls the DNS resolution behavior for remote-write connections. + // When enabled: + // - The remote-write mechanism will resolve the hostname via DNS. + // - It will randomly select one of the resolved IP addresses and connect to it. + // + // When disabled (default behavior): + // - The Go standard library will handle hostname resolution. + // - It will attempt connections to each resolved IP address sequentially. + // + // Note: The connection timeout applies to the entire resolution and connection process. + // + // If disabled, the timeout is distributed across all connection attempts. + // + // It requires Prometheus >= v3.1.0 or Thanos >= v0.38.0. + // + // +optional + RoundRobinDNS *bool `json:"roundRobinDNS,omitempty"` +} + +// +kubebuilder:validation:Enum=V1.0;V2.0 +type RemoteWriteMessageVersion string + +const ( + // Remote Write message's version 1.0. + RemoteWriteMessageVersion1_0 = RemoteWriteMessageVersion("V1.0") + // Remote Write message's version 2.0. + RemoteWriteMessageVersion2_0 = RemoteWriteMessageVersion("V2.0") +) + +// QueueConfig allows the tuning of remote write's queue_config parameters. +// This object is referenced in the RemoteWriteSpec object. +// +k8s:openapi-gen=true +type QueueConfig struct { + // capacity defines the number of samples to buffer per shard before we start + // dropping them. + // +optional + Capacity int `json:"capacity,omitempty"` + // minShards defines the minimum number of shards, i.e. amount of concurrency. + // +optional + MinShards int `json:"minShards,omitempty"` + // maxShards defines the maximum number of shards, i.e. amount of concurrency. + // +optional + MaxShards int `json:"maxShards,omitempty"` + // maxSamplesPerSend defines the maximum number of samples per send. + // +optional + MaxSamplesPerSend int `json:"maxSamplesPerSend,omitempty"` + // batchSendDeadline defines the maximum time a sample will wait in buffer. + // +optional + BatchSendDeadline *Duration `json:"batchSendDeadline,omitempty"` + // maxRetries defines the maximum number of times to retry a batch on recoverable errors. + // +optional + MaxRetries int `json:"maxRetries,omitempty"` + // minBackoff defines the initial retry delay. Gets doubled for every retry. + // +optional + MinBackoff *Duration `json:"minBackoff,omitempty"` + // maxBackoff defines the maximum retry delay. + // +optional + MaxBackoff *Duration `json:"maxBackoff,omitempty"` + // retryOnRateLimit defines the retry upon receiving a 429 status code from the remote-write storage. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // +optional + RetryOnRateLimit bool `json:"retryOnRateLimit,omitempty"` + // sampleAgeLimit drops samples older than the limit. + // It requires Prometheus >= v2.50.0 or Thanos >= v0.32.0. + // + // +optional + SampleAgeLimit *Duration `json:"sampleAgeLimit,omitempty"` +} + +// Sigv4 defines AWS's Signature Verification 4 signing process to +// sign requests. +// +k8s:openapi-gen=true +type Sigv4 struct { + // region defines the AWS region. If blank, the region from the default credentials chain used. + // +optional + Region string `json:"region,omitempty"` + // accessKey defines the AWS API key. If not specified, the environment variable + // `AWS_ACCESS_KEY_ID` is used. + // +optional + AccessKey *v1.SecretKeySelector `json:"accessKey,omitempty"` + // secretKey defines the AWS API secret. If not specified, the environment + // variable `AWS_SECRET_ACCESS_KEY` is used. + // +optional + SecretKey *v1.SecretKeySelector `json:"secretKey,omitempty"` + // profile defines the named AWS profile used to authenticate. + // +optional + Profile string `json:"profile,omitempty"` + // roleArn defines the named AWS profile used to authenticate. + // +optional + RoleArn string `json:"roleArn,omitempty"` + // useFIPSSTSEndpoint defines FIPS mode for AWS STS endpoint. + // It requires Prometheus >= v2.54.0. + // + // +optional + UseFIPSSTSEndpoint *bool `json:"useFIPSSTSEndpoint,omitempty"` +} + +// AzureAD defines the configuration for remote write's azuread parameters. +// +k8s:openapi-gen=true +type AzureAD struct { + // cloud defines the Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'. + // +kubebuilder:validation:Enum=AzureChina;AzureGovernment;AzurePublic + // +optional + Cloud *string `json:"cloud,omitempty"` + // managedIdentity defines the Azure User-assigned Managed identity. + // Cannot be set at the same time as `oauth` or `sdk`. + // +optional + ManagedIdentity *ManagedIdentity `json:"managedIdentity,omitempty"` + // oauth defines the oauth config that is being used to authenticate. + // Cannot be set at the same time as `managedIdentity` or `sdk`. + // + // It requires Prometheus >= v2.48.0 or Thanos >= v0.31.0. + // + // +optional + OAuth *AzureOAuth `json:"oauth,omitempty"` + // sdk defines the Azure SDK config that is being used to authenticate. + // See https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication + // Cannot be set at the same time as `oauth` or `managedIdentity`. + // + // It requires Prometheus >= v2.52.0 or Thanos >= v0.36.0. + // +optional + SDK *AzureSDK `json:"sdk,omitempty"` +} + +// AzureOAuth defines the Azure OAuth settings. +// +k8s:openapi-gen=true +type AzureOAuth struct { + // clientId defines the clientId of the Azure Active Directory application that is being used to authenticate. + // +required + // +kubebuilder:validation:MinLength=1 + ClientID string `json:"clientId"` + // clientSecret specifies a key of a Secret containing the client secret of the Azure Active Directory application that is being used to authenticate. + // +required + ClientSecret v1.SecretKeySelector `json:"clientSecret"` + // tenantId is the tenant ID of the Azure Active Directory application that is being used to authenticate. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern:=^[0-9a-zA-Z-.]+$ + TenantID string `json:"tenantId"` +} + +// ManagedIdentity defines the Azure User-assigned Managed identity. +// +k8s:openapi-gen=true +type ManagedIdentity struct { + // clientId defines defines the Azure User-assigned Managed identity. + // +required + ClientID string `json:"clientId"` +} + +// AzureSDK is used to store azure SDK config values. +type AzureSDK struct { + // tenantId defines the tenant ID of the azure active directory application that is being used to authenticate. + // +optional + // +kubebuilder:validation:Pattern:=^[0-9a-zA-Z-.]+$ + TenantID *string `json:"tenantId,omitempty"` +} + +// RemoteReadSpec defines the configuration for Prometheus to read back samples +// from a remote endpoint. +// +k8s:openapi-gen=true +type RemoteReadSpec struct { + // url defines the URL of the endpoint to query from. + // +required + URL string `json:"url"` + + // name of the remote read queue, it must be unique if specified. The + // name is used in metrics and logging in order to differentiate read + // configurations. + // + // It requires Prometheus >= v2.15.0. + // + // +optional + Name string `json:"name,omitempty"` + + // requiredMatchers defines an optional list of equality matchers which have to be present + // in a selector to query the remote read endpoint. + // +optional + RequiredMatchers map[string]string `json:"requiredMatchers,omitempty"` + + // remoteTimeout defines the timeout for requests to the remote read endpoint. + // +optional + RemoteTimeout *Duration `json:"remoteTimeout,omitempty"` + + // headers defines the custom HTTP headers to be sent along with each remote read request. + // Be aware that headers that are set by Prometheus itself can't be overwritten. + // Only valid in Prometheus versions 2.26.0 and newer. + // +optional + Headers map[string]string `json:"headers,omitempty"` + + // readRecent defines whether reads should be made for queries for time ranges that + // the local storage should have complete data for. + // +optional + ReadRecent bool `json:"readRecent,omitempty"` + + // oauth2 configuration for the URL. + // + // It requires Prometheus >= v2.27.0. + // + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // + // +optional + OAuth2 *OAuth2 `json:"oauth2,omitempty"` + // basicAuth configuration for the URL. + // + // Cannot be set at the same time as `authorization`, or `oauth2`. + // + // +optional + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + // bearerTokenFile defines the file from which to read the bearer token for the URL. + // + // Deprecated: this will be removed in a future release. Prefer using `authorization`. + // +optional + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + // authorization section for the URL. + // + // It requires Prometheus >= v2.26.0. + // + // Cannot be set at the same time as `basicAuth`, or `oauth2`. + // + // +optional + Authorization *Authorization `json:"authorization,omitempty"` + + // bearerToken is deprecated: this will be removed in a future release. + // *Warning: this field shouldn't be used because the token value appears + // in clear-text. Prefer using `authorization`.* + // + // +optional + BearerToken string `json:"bearerToken,omitempty"` + + // tlsConfig to use for the URL. + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + + // Optional ProxyConfig. + // +optional + ProxyConfig `json:",inline"` + + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // + // It requires Prometheus >= v2.26.0. + // + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` + + // filterExternalLabels defines whether to use the external labels as selectors for the remote read endpoint. + // + // It requires Prometheus >= v2.34.0. + // + // +optional + FilterExternalLabels *bool `json:"filterExternalLabels,omitempty"` +} + +// RelabelConfig allows dynamic rewriting of the label set for targets, alerts, +// scraped samples and remote write samples. +// +// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config +// +// +k8s:openapi-gen=true +type RelabelConfig struct { + // sourceLabels defines the source labels select values from existing labels. Their content is + // concatenated using the configured Separator and matched against the + // configured regular expression. + // + // +optional + SourceLabels []LabelName `json:"sourceLabels,omitempty"` + + // separator defines the string between concatenated SourceLabels. + // +optional + Separator *string `json:"separator,omitempty"` + + // targetLabel defines the label to which the resulting string is written in a replacement. + // + // It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + // `KeepEqual` and `DropEqual` actions. + // + // Regex capture groups are available. + // +optional + TargetLabel string `json:"targetLabel,omitempty"` + + // regex defines the regular expression against which the extracted value is matched. + // +optional + Regex string `json:"regex,omitempty"` + + // modulus to take of the hash of the source label values. + // + // Only applicable when the action is `HashMod`. + // +optional + Modulus uint64 `json:"modulus,omitempty"` + + // replacement value against which a Replace action is performed if the + // regular expression matches. + // + // Regex capture groups are available. + // + // +optional + Replacement *string `json:"replacement,omitempty"` + + // action to perform based on the regex matching. + // + // `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + // `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + // + // Default: "Replace" + // + // +kubebuilder:validation:Enum=replace;Replace;keep;Keep;drop;Drop;hashmod;HashMod;labelmap;LabelMap;labeldrop;LabelDrop;labelkeep;LabelKeep;lowercase;Lowercase;uppercase;Uppercase;keepequal;KeepEqual;dropequal;DropEqual + // +kubebuilder:default=replace + // +optional + Action string `json:"action,omitempty"` +} + +// APIServerConfig defines how the Prometheus server connects to the Kubernetes API server. +// +// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config +// +// +k8s:openapi-gen=true +type APIServerConfig struct { + // host defines the Kubernetes API address consisting of a hostname or IP address followed + // by an optional port number. + // +required + Host string `json:"host"` + + // basicAuth configuration for the API server. + // + // Cannot be set at the same time as `authorization`, `bearerToken`, or + // `bearerTokenFile`. + // + // +optional + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + + // bearerTokenFile defines the file to read bearer token for accessing apiserver. + // + // Cannot be set at the same time as `basicAuth`, `authorization`, or `bearerToken`. + // + // Deprecated: this will be removed in a future release. Prefer using `authorization`. + // +optional + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + + // tlsConfig to use for the API server. + // + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + + // authorization section for the API server. + // + // Cannot be set at the same time as `basicAuth`, `bearerToken`, or + // `bearerTokenFile`. + // + // +optional + Authorization *Authorization `json:"authorization,omitempty"` + + // bearerToken is deprecated: this will be removed in a future release. + // *Warning: this field shouldn't be used because the token value appears + // in clear-text. Prefer using `authorization`.* + // + // +optional + BearerToken string `json:"bearerToken,omitempty"` + + // Optional ProxyConfig. + // +optional + ProxyConfig `json:",inline"` +} + +// +kubebuilder:validation:Enum=v1;V1;v2;V2 +type AlertmanagerAPIVersion string + +const ( + AlertmanagerAPIVersion1 = AlertmanagerAPIVersion("V1") + AlertmanagerAPIVersion2 = AlertmanagerAPIVersion("V2") +) + +// AlertmanagerEndpoints defines a selection of a single Endpoints object +// containing Alertmanager IPs to fire alerts against. +// +k8s:openapi-gen=true +type AlertmanagerEndpoints struct { + // namespace of the Endpoints object. + // + // If not set, the object will be discovered in the namespace of the + // Prometheus object. + // + // +kubebuilder:validation:MinLength:=1 + // +optional + Namespace *string `json:"namespace,omitempty"` + + // name of the Endpoints object in the namespace. + // + // +kubebuilder:validation:MinLength:=1 + // +required + Name string `json:"name"` + + // port on which the Alertmanager API is exposed. + // +required + Port intstr.IntOrString `json:"port"` + + // scheme to use when firing alerts. + // +optional + Scheme string `json:"scheme,omitempty"` + + // pathPrefix defines the prefix for the HTTP path alerts are pushed to. + // +optional + PathPrefix string `json:"pathPrefix,omitempty"` + + // tlsConfig to use for Alertmanager. + // + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + + // basicAuth configuration for Alertmanager. + // + // Cannot be set at the same time as `bearerTokenFile`, `authorization` or `sigv4`. + // + // +optional + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + + // bearerTokenFile defines the file to read bearer token for Alertmanager. + // + // Cannot be set at the same time as `basicAuth`, `authorization`, or `sigv4`. + // + // Deprecated: this will be removed in a future release. Prefer using `authorization`. + // +optional + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + + // authorization section for Alertmanager. + // + // Cannot be set at the same time as `basicAuth`, `bearerTokenFile` or `sigv4`. + // + // +optional + Authorization *SafeAuthorization `json:"authorization,omitempty"` + + // sigv4 defines AWS's Signature Verification 4 for the URL. + // + // It requires Prometheus >= v2.48.0. + // + // Cannot be set at the same time as `basicAuth`, `bearerTokenFile` or `authorization`. + // + // +optional + Sigv4 *Sigv4 `json:"sigv4,omitempty"` + + // ProxyConfig + // +optional + ProxyConfig `json:",inline"` + + // apiVersion defines the version of the Alertmanager API that Prometheus uses to send alerts. + // It can be "V1" or "V2". + // The field has no effect for Prometheus >= v3.0.0 because only the v2 API is supported. + // + // +optional + APIVersion *AlertmanagerAPIVersion `json:"apiVersion,omitempty"` + + // timeout defines a per-target Alertmanager timeout when pushing alerts. + // + // +optional + Timeout *Duration `json:"timeout,omitempty"` + + // enableHttp2 defines whether to enable HTTP2. + // + // +optional + EnableHttp2 *bool `json:"enableHttp2,omitempty"` + + // relabelings defines the relabel configuration applied to the discovered Alertmanagers. + // + // +optional + RelabelConfigs []RelabelConfig `json:"relabelings,omitempty"` + + // alertRelabelings defines the relabeling configs applied before sending alerts to a specific Alertmanager. + // It requires Prometheus >= v2.51.0. + // + // +optional + AlertRelabelConfigs []RelabelConfig `json:"alertRelabelings,omitempty"` +} + +// +k8s:openapi-gen=true +type Rules struct { + // alert defines the parameters of the Prometheus rules' engine. + // + // Any update to these parameters trigger a restart of the pods. + // +optional + Alert RulesAlert `json:"alert,omitempty"` +} + +// +k8s:openapi-gen=true +type RulesAlert struct { + // forOutageTolerance defines the max time to tolerate prometheus outage for restoring 'for' state of + // alert. + // +optional + ForOutageTolerance string `json:"forOutageTolerance,omitempty"` + + // forGracePeriod defines the minimum duration between alert and restored 'for' state. + // + // This is maintained only for alerts with a configured 'for' time greater + // than the grace period. + // +optional + ForGracePeriod string `json:"forGracePeriod,omitempty"` + + // resendDelay defines the minimum amount of time to wait before resending an alert to + // Alertmanager. + // +optional + ResendDelay string `json:"resendDelay,omitempty"` +} + +// MetadataConfig configures the sending of series metadata to the remote storage. +// +// +k8s:openapi-gen=true +type MetadataConfig struct { + // send defines whether metric metadata is sent to the remote storage or not. + // +optional + Send bool `json:"send,omitempty"` + + // sendInterval defines how frequently metric metadata is sent to the remote storage. + // +optional + SendInterval Duration `json:"sendInterval,omitempty"` + + // maxSamplesPerSend defines the maximum number of metadata samples per send. + // + // It requires Prometheus >= v2.29.0. + // + // +optional + // +kubebuilder:validation:Minimum=-1 + MaxSamplesPerSend *int32 `json:"maxSamplesPerSend,omitempty"` +} + +type ShardStatus struct { + // shardID defines the identifier of the shard. + // +required + ShardID string `json:"shardID"` + // replicas defines the total number of pods targeted by this shard. + // +required + Replicas int32 `json:"replicas"` + // updatedReplicas defines the total number of non-terminated pods targeted by this shard + // that have the desired spec. + // +required + UpdatedReplicas int32 `json:"updatedReplicas"` + // availableReplicas defines the total number of available pods (ready for at least minReadySeconds) + // targeted by this shard. + // +required + AvailableReplicas int32 `json:"availableReplicas"` + // unavailableReplicas defines the Total number of unavailable pods targeted by this shard. + // +required + UnavailableReplicas int32 `json:"unavailableReplicas"` +} + +type TSDBSpec struct { + // outOfOrderTimeWindow defines how old an out-of-order/out-of-bounds sample can be with + // respect to the TSDB max time. + // + // An out-of-order/out-of-bounds sample is ingested into the TSDB as long as + // the timestamp of the sample is >= (TSDB.MaxTime - outOfOrderTimeWindow). + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // It requires Prometheus >= v2.39.0 or PrometheusAgent >= v2.54.0. + // +optional + OutOfOrderTimeWindow *Duration `json:"outOfOrderTimeWindow,omitempty"` +} + +type Exemplars struct { + // maxSize defines the maximum number of exemplars stored in memory for all series. + // + // exemplar-storage itself must be enabled using the `spec.enableFeature` + // option for exemplars to be scraped in the first place. + // + // If not set, Prometheus uses its default value. A value of zero or less + // than zero disables the storage. + // + // +optional + MaxSize *int64 `json:"maxSize,omitempty"` +} + +// SafeAuthorization specifies a subset of the Authorization struct, that is +// safe for use because it doesn't provide access to the Prometheus container's +// filesystem. +// +// +k8s:openapi-gen=true +type SafeAuthorization struct { + // type defines the authentication type. The value is case-insensitive. + // + // "Basic" is not a supported value. + // + // Default: "Bearer" + // +optional + Type string `json:"type,omitempty"` + + // credentials defines a key of a Secret in the namespace that contains the credentials for authentication. + // +optional + Credentials *v1.SecretKeySelector `json:"credentials,omitempty"` +} + +// Validate semantically validates the given Authorization section. +func (c *SafeAuthorization) Validate() error { + if c == nil { + return nil + } + + if strings.ToLower(strings.TrimSpace(c.Type)) == "basic" { + return errors.New("authorization type cannot be set to \"basic\", use \"basicAuth\" instead") + } + + if c.Credentials == nil { + return errors.New("authorization credentials are required") + } + + return nil +} + +type Authorization struct { + // +optional + SafeAuthorization `json:",inline"` + + // credentialsFile defines the file to read a secret from, mutually exclusive with `credentials`. + // +optional + CredentialsFile string `json:"credentialsFile,omitempty"` +} + +// Validate semantically validates the given Authorization section. +func (c *Authorization) Validate() error { + if c == nil { + return nil + } + + if c.Credentials != nil && c.CredentialsFile != "" { + return errors.New("authorization can not specify both \"credentials\" and \"credentialsFile\"") + } + + if strings.ToLower(strings.TrimSpace(c.Type)) == "basic" { + return errors.New("authorization type cannot be set to \"basic\", use \"basicAuth\" instead") + } + + return nil +} + +type ScrapeClass struct { + // name of the scrape class. + // + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + + // default defines that the scrape applies to all scrape objects that + // don't configure an explicit scrape class name. + // + // Only one scrape class can be set as the default. + // + // +optional + Default *bool `json:"default,omitempty"` + + // fallbackScrapeProtocol defines the protocol to use if a scrape returns blank, unparseable, or otherwise invalid Content-Type. + // It will only apply if the scrape resource doesn't specify any FallbackScrapeProtocol + // + // It requires Prometheus >= v3.0.0. + // +optional + FallbackScrapeProtocol *ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + + // tlsConfig defines the TLS settings to use for the scrape. When the + // scrape objects define their own CA, certificate and/or key, they take + // precedence over the corresponding scrape class fields. + // + // For now only the `caFile`, `certFile` and `keyFile` fields are supported. + // + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + + // authorization section for the ScrapeClass. + // It will only apply if the scrape resource doesn't specify any Authorization. + // +optional + Authorization *Authorization `json:"authorization,omitempty"` + + // relabelings defines the relabeling rules to apply to all scrape targets. + // + // The Operator automatically adds relabelings for a few standard Kubernetes fields + // like `__meta_kubernetes_namespace` and `__meta_kubernetes_service_name`. + // Then the Operator adds the scrape class relabelings defined here. + // Then the Operator adds the target-specific relabelings defined in the scrape object. + // + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // + // +optional + Relabelings []RelabelConfig `json:"relabelings,omitempty"` + + // metricRelabelings defines the relabeling rules to apply to all samples before ingestion. + // + // The Operator adds the scrape class metric relabelings defined here. + // Then the Operator adds the target-specific metric relabelings defined in ServiceMonitors, PodMonitors, Probes and ScrapeConfigs. + // Then the Operator adds namespace enforcement relabeling rule, specified in '.spec.enforcedNamespaceLabel'. + // + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs + // + // +optional + MetricRelabelings []RelabelConfig `json:"metricRelabelings,omitempty"` + + // attachMetadata defines additional metadata to the discovered targets. + // When the scrape object defines its own configuration, it takes + // precedence over the scrape class configuration. + // + // +optional + AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` +} + +// TranslationStrategyOption represents a translation strategy option for the OTLP endpoint. +// Supported values are: +// * `NoUTF8EscapingWithSuffixes` +// * `UnderscoreEscapingWithSuffixes` +// * `UnderscoreEscapingWithoutSuffixes` +// * `NoTranslation` +// +kubebuilder:validation:Enum=NoUTF8EscapingWithSuffixes;UnderscoreEscapingWithSuffixes;NoTranslation;UnderscoreEscapingWithoutSuffixes +type TranslationStrategyOption string + +const ( + NoUTF8EscapingWithSuffixes TranslationStrategyOption = "NoUTF8EscapingWithSuffixes" + UnderscoreEscapingWithSuffixes TranslationStrategyOption = "UnderscoreEscapingWithSuffixes" + // It requires Prometheus >= v3.4.0. + NoTranslation TranslationStrategyOption = "NoTranslation" + // It requires Prometheus >= v3.6.0. + UnderscoreEscapingWithoutSuffixes TranslationStrategyOption = "UnderscoreEscapingWithoutSuffixes" +) + +// OTLPConfig is the configuration for writing to the OTLP endpoint. +// +// +k8s:openapi-gen=true +type OTLPConfig struct { + // promoteAllResourceAttributes promotes all resource attributes to metric labels except the ones defined in `ignoreResourceAttributes`. + // + // Cannot be true when `promoteResourceAttributes` is defined. + // It requires Prometheus >= v3.5.0. + // +optional + PromoteAllResourceAttributes *bool `json:"promoteAllResourceAttributes,omitempty"` + + // ignoreResourceAttributes defines the list of OpenTelemetry resource attributes to ignore when `promoteAllResourceAttributes` is true. + // + // It requires `promoteAllResourceAttributes` to be true. + // It requires Prometheus >= v3.5.0. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:MinLength=1 + // +listType=set + // +optional + IgnoreResourceAttributes []string `json:"ignoreResourceAttributes,omitempty"` + + // promoteResourceAttributes defines the list of OpenTelemetry Attributes that should be promoted to metric labels, defaults to none. + // Cannot be defined when `promoteAllResourceAttributes` is true. + // + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:MinLength=1 + // +listType=set + // +optional + PromoteResourceAttributes []string `json:"promoteResourceAttributes,omitempty"` + + // translationStrategy defines how the OTLP receiver endpoint translates the incoming metrics. + // + // It requires Prometheus >= v3.0.0. + // +optional + TranslationStrategy *TranslationStrategyOption `json:"translationStrategy,omitempty"` + + // keepIdentifyingResourceAttributes enables adding `service.name`, `service.namespace` and `service.instance.id` + // resource attributes to the `target_info` metric, on top of converting them into the `instance` and `job` labels. + // + // It requires Prometheus >= v3.1.0. + // +optional + KeepIdentifyingResourceAttributes *bool `json:"keepIdentifyingResourceAttributes,omitempty"` + + // convertHistogramsToNHCB defines optional translation of OTLP explicit bucket histograms into native histograms with custom buckets. + // It requires Prometheus >= v3.4.0. + // +optional + ConvertHistogramsToNHCB *bool `json:"convertHistogramsToNHCB,omitempty"` + + // promoteScopeMetadata controls whether to promote OpenTelemetry scope metadata (i.e. name, version, schema URL, and attributes) to metric labels. + // As per the OpenTelemetry specification, the aforementioned scope metadata should be identifying, i.e. made into metric labels. + // It requires Prometheus >= v3.6.0. + // +optional + PromoteScopeMetadata *bool `json:"promoteScopeMetadata,omitempty"` +} + +// Validate semantically validates the given OTLPConfig section. +func (c *OTLPConfig) Validate() error { + if c == nil { + return nil + } + + if len(c.PromoteResourceAttributes) > 0 && c.PromoteAllResourceAttributes != nil && *c.PromoteAllResourceAttributes { + return fmt.Errorf("'promoteAllResourceAttributes' cannot be set to 'true' simultaneously with 'promoteResourceAttributes'") + } + + if len(c.IgnoreResourceAttributes) > 0 && (c.PromoteAllResourceAttributes == nil || !*c.PromoteAllResourceAttributes) { + return fmt.Errorf("'ignoreResourceAttributes' can only be set when 'promoteAllResourceAttributes' is true") + } + + return nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go new file mode 100644 index 0000000000..1534ee4ba5 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go @@ -0,0 +1,163 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + PrometheusRuleKind = "PrometheusRule" + PrometheusRuleName = "prometheusrules" + PrometheusRuleKindKey = "prometheusrule" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="promrule" +// +kubebuilder:subresource:status + +// The `PrometheusRule` custom resource definition (CRD) defines [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) and [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) rules to be evaluated by `Prometheus` or `ThanosRuler` objects. +// +// `Prometheus` and `ThanosRuler` objects select `PrometheusRule` objects using label and namespace selectors. +type PrometheusRule struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of desired alerting rule definitions for Prometheus. + // +required + Spec PrometheusRuleSpec `json:"spec"` + // status defines the status subresource. It is under active development and is updated only when the + // "StatusForConfigurationResources" feature gate is enabled. + // + // Most recent observed status of the PrometheusRule. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ConfigResourceStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (f *PrometheusRule) DeepCopyObject() runtime.Object { + return f.DeepCopy() +} + +// PrometheusRuleSpec contains specification parameters for a Rule. +// +k8s:openapi-gen=true +type PrometheusRuleSpec struct { + // groups defines the content of Prometheus rule file + // +listType=map + // +listMapKey=name + // +optional + Groups []RuleGroup `json:"groups,omitempty"` +} + +// RuleGroup and Rule are copied instead of vendored because the +// upstream Prometheus struct definitions don't have json struct tags. + +// RuleGroup is a list of sequentially evaluated recording and alerting rules. +// +k8s:openapi-gen=true +type RuleGroup struct { + // name defines the name of the rule group. + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + // labels define the labels to add or overwrite before storing the result for its rules. + // The labels defined at the rule level take precedence. + // + // It requires Prometheus >= 3.0.0. + // The field is ignored for Thanos Ruler. + // +optional + Labels map[string]string `json:"labels,omitempty"` + // interval defines how often rules in the group are evaluated. + // +optional + Interval *Duration `json:"interval,omitempty"` + // query_offset defines the offset the rule evaluation timestamp of this particular group by the specified duration into the past. + // + // It requires Prometheus >= v2.53.0. + // It is not supported for ThanosRuler. + // +optional + //nolint:kubeapilinter // The json tag doesn't meet the conventions to be compatible with Prometheus format. + QueryOffset *Duration `json:"query_offset,omitempty"` + // rules defines the list of alerting and recording rules. + // +optional + Rules []Rule `json:"rules,omitempty"` + // partial_response_strategy is only used by ThanosRuler and will + // be ignored by Prometheus instances. + // More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response + // +kubebuilder:validation:Pattern="^(?i)(abort|warn)?$" + // +optional + //nolint:kubeapilinter // The json tag doesn't meet the conventions to be compatible with Prometheus format. + PartialResponseStrategy string `json:"partial_response_strategy,omitempty"` + // limit defines the number of alerts an alerting rule and series a recording + // rule can produce. + // Limit is supported starting with Prometheus >= 2.31 and Thanos Ruler >= 0.24. + // +optional + Limit *int `json:"limit,omitempty"` +} + +// Rule describes an alerting or recording rule +// See Prometheus documentation: [alerting](https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) or [recording](https://www.prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) rule +// +k8s:openapi-gen=true +// +kubebuilder:validation:OneOf=Record,Alert +type Rule struct { + // record defines the name of the time series to output to. Must be a valid metric name. + // Only one of `record` and `alert` must be set. + // +optional + Record string `json:"record,omitempty"` + // alert defines the name of the alert. Must be a valid label value. + // Only one of `record` and `alert` must be set. + // +optional + Alert string `json:"alert,omitempty"` + // expr defines the PromQL expression to evaluate. + // +required + Expr intstr.IntOrString `json:"expr"` + // for defines how alerts are considered firing once they have been returned for this long. + // +optional + For *Duration `json:"for,omitempty"` + // keep_firing_for defines how long an alert will continue firing after the condition that triggered it has cleared. + // +optional + //nolint:kubeapilinter // The json tag doesn't meet the conventions to be compatible with Prometheus format. + KeepFiringFor *NonEmptyDuration `json:"keep_firing_for,omitempty"` + // labels defines labels to add or overwrite. + // +optional + Labels map[string]string `json:"labels,omitempty"` + // annotations defines annotations to add to each alert. + // Only valid for alerting rules. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} + +// PrometheusRuleList is a list of PrometheusRules. +// +k8s:openapi-gen=true +type PrometheusRuleList struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of Rules + // +required + Items []PrometheusRule `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PrometheusRuleList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/register.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/register.go new file mode 100644 index 0000000000..37786147ab --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/register.go @@ -0,0 +1,67 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" +) + +// SchemeGroupVersion is the group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: monitoring.GroupName, Version: Version} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Prometheus{}, + &PrometheusList{}, + &ServiceMonitor{}, + &ServiceMonitorList{}, + &PodMonitor{}, + &PodMonitorList{}, + &Probe{}, + &ProbeList{}, + &Alertmanager{}, + &AlertmanagerList{}, + &PrometheusRule{}, + &PrometheusRuleList{}, + &ThanosRuler{}, + &ThanosRulerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go new file mode 100644 index 0000000000..90bcd79974 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go @@ -0,0 +1,228 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + ServiceMonitorsKind = "ServiceMonitor" + ServiceMonitorName = "servicemonitors" + ServiceMonitorKindKey = "servicemonitor" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="smon" +// +kubebuilder:subresource:status + +// The `ServiceMonitor` custom resource definition (CRD) defines how `Prometheus` and `PrometheusAgent` can scrape metrics from a group of services. +// Among other things, it allows to specify: +// * The services to scrape via label selectors. +// * The container ports to scrape. +// * Authentication credentials to use. +// * Target and metric relabeling. +// +// `Prometheus` and `PrometheusAgent` objects select `ServiceMonitor` objects using label and namespace selectors. +type ServiceMonitor struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of desired Service selection for target discovery by + // Prometheus. + // +required + Spec ServiceMonitorSpec `json:"spec"` + // status defines the status subresource. It is under active development and is updated only when the + // "StatusForConfigurationResources" feature gate is enabled. + // + // Most recent observed status of the ServiceMonitor. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ConfigResourceStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ServiceMonitor) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +func (l *ServiceMonitor) Bindings() []WorkloadBinding { + return l.Status.Bindings +} + +// ServiceMonitorSpec defines the specification parameters for a ServiceMonitor. +// +k8s:openapi-gen=true +type ServiceMonitorSpec struct { + // jobLabel selects the label from the associated Kubernetes `Service` + // object which will be used as the `job` label for all metrics. + // + // For example if `jobLabel` is set to `foo` and the Kubernetes `Service` + // object is labeled with `foo: bar`, then Prometheus adds the `job="bar"` + // label to all ingested metrics. + // + // If the value of this field is empty or if the label doesn't exist for + // the given Service, the `job` label of the metrics defaults to the name + // of the associated Kubernetes `Service`. + // +optional + JobLabel string `json:"jobLabel,omitempty"` + + // targetLabels defines the labels which are transferred from the + // associated Kubernetes `Service` object onto the ingested metrics. + // + // +optional + TargetLabels []string `json:"targetLabels,omitempty"` + // podTargetLabels defines the labels which are transferred from the + // associated Kubernetes `Pod` object onto the ingested metrics. + // + // +optional + PodTargetLabels []string `json:"podTargetLabels,omitempty"` + + // endpoints defines the list of endpoints part of this ServiceMonitor. + // Defines how to scrape metrics from Kubernetes [Endpoints](https://kubernetes.io/docs/concepts/services-networking/service/#endpoints) objects. + // In most cases, an Endpoints object is backed by a Kubernetes [Service](https://kubernetes.io/docs/concepts/services-networking/service/) object with the same name and labels. + // +required + Endpoints []Endpoint `json:"endpoints"` + + // selector defines the label selector to select the Kubernetes `Endpoints` objects to scrape metrics from. + // +required + Selector metav1.LabelSelector `json:"selector"` + + // selectorMechanism defines the mechanism used to select the endpoints to scrape. + // By default, the selection process relies on relabel configurations to filter the discovered targets. + // Alternatively, you can opt in for role selectors, which may offer better efficiency in large clusters. + // Which strategy is best for your use case needs to be carefully evaluated. + // + // It requires Prometheus >= v2.17.0. + // + // +optional + SelectorMechanism *SelectorMechanism `json:"selectorMechanism,omitempty"` + + // namespaceSelector defines in which namespace(s) Prometheus should discover the services. + // By default, the services are discovered in the same namespace as the `ServiceMonitor` object but it is possible to select pods across different/all namespaces. + // +optional + NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` + + // sampleLimit defines a per-scrape limit on the number of scraped samples + // that will be accepted. + // + // +optional + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + + // scrapeProtocols defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + + // fallbackScrapeProtocol defines the protocol to use if a scrape returns blank, unparseable, or otherwise invalid Content-Type. + // + // It requires Prometheus >= v3.0.0. + // +optional + FallbackScrapeProtocol *ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + + // targetLimit defines a limit on the number of scraped targets that will + // be accepted. + // + // +optional + TargetLimit *uint64 `json:"targetLimit,omitempty"` + + // labelLimit defines the per-scrape limit on number of labels that will be accepted for a sample. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelLimit *uint64 `json:"labelLimit,omitempty"` + // labelNameLengthLimit defines the per-scrape limit on length of labels name that will be accepted for a sample. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + // labelValueLengthLimit defines the per-scrape limit on length of labels value that will be accepted for a sample. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + + // +optional + NativeHistogramConfig `json:",inline"` + + // keepDroppedTargets defines the per-scrape limit on the number of targets dropped by relabeling + // that will be kept in memory. 0 means no limit. + // + // It requires Prometheus >= v2.47.0. + // + // +optional + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // attachMetadata defines additional metadata which is added to the + // discovered targets. + // + // It requires Prometheus >= v2.37.0. + // + // +optional + AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` + + // scrapeClass defines the scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` + + // bodySizeLimit when defined, bodySizeLimit specifies a job level limit on the size + // of uncompressed response body that will be accepted by Prometheus. + // + // It requires Prometheus >= v2.28.0. + // + // +optional + BodySizeLimit *ByteSize `json:"bodySizeLimit,omitempty"` + + // serviceDiscoveryRole defines the service discovery role used to discover targets. + // + // If set, the value should be either "Endpoints" or "EndpointSlice". + // Otherwise it defaults to the value defined in the + // Prometheus/PrometheusAgent resource. + // + // +optional + ServiceDiscoveryRole *ServiceDiscoveryRole `json:"serviceDiscoveryRole,omitempty"` +} + +// ServiceMonitorList is a list of ServiceMonitors. +// +k8s:openapi-gen=true +type ServiceMonitorList struct { + // TypeMeta defines the versioned schema of this representation of an object + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of ServiceMonitors + // +required + Items []ServiceMonitor `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ServiceMonitorList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go new file mode 100644 index 0000000000..434664c23b --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go @@ -0,0 +1,587 @@ +// Copyright 2020 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + ThanosRulerKind = "ThanosRuler" + ThanosRulerName = "thanosrulers" + ThanosRulerKindKey = "thanosrulers" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="ruler" +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of Thanos Ruler" +// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas",description="The number of desired replicas" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.availableReplicas",description="The number of ready replicas" +// +kubebuilder:printcolumn:name="Reconciled",type="string",JSONPath=".status.conditions[?(@.type == 'Reconciled')].status" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.conditions[?(@.type == 'Available')].status" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".status.paused",description="Whether the resource reconciliation is paused or not",priority=1 +// +kubebuilder:subresource:status + +// The `ThanosRuler` custom resource definition (CRD) defines a desired [Thanos Ruler](https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md) setup to run in a Kubernetes cluster. +// +// A `ThanosRuler` instance requires at least one compatible Prometheus API endpoint (either Thanos Querier or Prometheus services). +// +// The resource defines via label and namespace selectors which `PrometheusRule` objects should be associated to the deployed Thanos Ruler instances. +type ThanosRuler struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of the desired behavior of the ThanosRuler cluster. More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +required + Spec ThanosRulerSpec `json:"spec"` + // status defines the most recent observed status of the ThanosRuler cluster. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ThanosRulerStatus `json:"status,omitempty"` +} + +// ThanosRulerList is a list of ThanosRulers. +// +k8s:openapi-gen=true +type ThanosRulerList struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of Prometheuses + // +required + Items []ThanosRuler `json:"items"` +} + +// ThanosRulerSpec is a specification of the desired behavior of the ThanosRuler. More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type ThanosRulerSpec struct { + // version of Thanos to be deployed. + // +optional + Version *string `json:"version,omitempty"` + + // podMetadata defines labels and annotations which are propagated to the ThanosRuler pods. + // + // The following items are reserved and cannot be overridden: + // * "app.kubernetes.io/name" label, set to "thanos-ruler". + // * "app.kubernetes.io/managed-by" label, set to "prometheus-operator". + // * "app.kubernetes.io/instance" label, set to the name of the ThanosRuler instance. + // * "thanos-ruler" label, set to the name of the ThanosRuler instance. + // * "kubectl.kubernetes.io/default-container" annotation, set to "thanos-ruler". + // +optional + PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` + + // image defines Thanos container image URL. + // +optional + Image string `json:"image,omitempty"` + + // imagePullPolicy defines for the 'thanos', 'init-config-reloader' and 'config-reloader' containers. + // See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy for more details. + // +kubebuilder:validation:Enum="";Always;Never;IfNotPresent + // +optional + ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // imagePullSecrets defines an optional list of references to secrets in the same namespace + // to use for pulling thanos images from registries + // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + // +optional + ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + + // paused defines when a ThanosRuler deployment is paused, no actions except for deletion + // will be performed on the underlying objects. + // +optional + Paused bool `json:"paused,omitempty"` + + // replicas defines the number of thanos ruler instances to deploy. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // nodeSelector defines which Nodes the Pods are scheduled on. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // resources defines the resource requirements for single Pods. + // If not provided, no requests/limits will be set + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // affinity defines when specified, the pod's scheduling constraints. + // +optional + Affinity *v1.Affinity `json:"affinity,omitempty"` + + // tolerations defines when specified, the pod's tolerations. + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + + // topologySpreadConstraints defines the pod's topology spread constraints. + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + + // securityContext defines the pod-level security attributes and common container settings. + // This defaults to the default PodSecurityContext. + // +optional + SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` + + // dnsPolicy defines the DNS policy for the pods. + // + // +optional + DNSPolicy *DNSPolicy `json:"dnsPolicy,omitempty"` + // dnsConfig defines Defines the DNS configuration for the pods. + // + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty"` + + // enableServiceLinks defines whether information about services should be injected into pod's environment variables + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` + + // priorityClassName defines the priority class assigned to the Pods + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + + // serviceName defines the name of the service name used by the underlying StatefulSet(s) as the governing service. + // If defined, the Service must be created before the ThanosRuler resource in the same namespace and it must define a selector that matches the pod labels. + // If empty, the operator will create and manage a headless service named `thanos-ruler-operated` for ThanosRuler resources. + // When deploying multiple ThanosRuler resources in the same namespace, it is recommended to specify a different value for each. + // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id for more details. + // +optional + // +kubebuilder:validation:MinLength=1 + ServiceName *string `json:"serviceName,omitempty"` + + // serviceAccountName defines the name of the ServiceAccount to use to run the + // Thanos Ruler Pods. + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // storage defines the specification of how storage shall be used. + // +optional + Storage *StorageSpec `json:"storage,omitempty"` + + // volumes defines how configuration of additional volumes on the output StatefulSet definition. Volumes specified will + // be appended to other volumes that are generated as a result of StorageSpec objects. + // +optional + Volumes []v1.Volume `json:"volumes,omitempty"` + // volumeMounts defines how the configuration of additional VolumeMounts on the output StatefulSet definition. + // VolumeMounts specified will be appended to other VolumeMounts in the ruler container, + // that are generated as a result of StorageSpec objects. + // +optional + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` + + // objectStorageConfig defines the configuration format is defined at https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage + // + // The operator performs no validation of the configuration. + // + // `objectStorageConfigFile` takes precedence over this field. + // + // +optional + ObjectStorageConfig *v1.SecretKeySelector `json:"objectStorageConfig,omitempty"` + // objectStorageConfigFile defines the path of the object storage configuration file. + // + // The configuration format is defined at https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage + // + // The operator performs no validation of the configuration file. + // + // This field takes precedence over `objectStorageConfig`. + // + // +optional + ObjectStorageConfigFile *string `json:"objectStorageConfigFile,omitempty"` + + // listenLocal defines the Thanos ruler listen on loopback, so that it + // does not bind against the Pod IP. + // +optional + ListenLocal bool `json:"listenLocal,omitempty"` + + // queryEndpoints defines the list of Thanos Query endpoints from which to query metrics. + // + // For Thanos >= v0.11.0, it is recommended to use `queryConfig` instead. + // + // `queryConfig` takes precedence over this field. + // + // +optional + QueryEndpoints []string `json:"queryEndpoints,omitempty"` + + // queryConfig defines the list of Thanos Query endpoints from which to query metrics. + // + // The configuration format is defined at https://thanos.io/tip/components/rule.md/#query-api + // + // It requires Thanos >= v0.11.0. + // + // The operator performs no validation of the configuration. + // + // This field takes precedence over `queryEndpoints`. + // + // +optional + QueryConfig *v1.SecretKeySelector `json:"queryConfig,omitempty"` + + // alertmanagersUrl defines the list of Alertmanager endpoints to send alerts to. + // + // For Thanos >= v0.10.0, it is recommended to use `alertmanagersConfig` instead. + // + // `alertmanagersConfig` takes precedence over this field. + // + // +optional + AlertManagersURL []string `json:"alertmanagersUrl,omitempty"` + // alertmanagersConfig defines the list of Alertmanager endpoints to send alerts to. + // + // The configuration format is defined at https://thanos.io/tip/components/rule.md/#alertmanager. + // + // It requires Thanos >= v0.10.0. + // + // The operator performs no validation of the configuration. + // + // This field takes precedence over `alertmanagersUrl`. + // + // +optional + AlertManagersConfig *v1.SecretKeySelector `json:"alertmanagersConfig,omitempty"` + + // ruleSelector defines the PrometheusRule objects to be selected for rule evaluation. An empty + // label selector matches all objects. A null label selector matches no + // objects. + // + // +optional + RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"` + // ruleNamespaceSelector defines the namespaces to be selected for Rules discovery. If unspecified, only + // the same namespace as the ThanosRuler object is in is used. + // + // +optional + RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"` + + // enforcedNamespaceLabel enforces adding a namespace label of origin for each alert + // and metric that is user created. The label value will always be the namespace of the object that is + // being created. + // +optional + EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` + // excludedFromEnforcement defines the list of references to PrometheusRule objects + // to be excluded from enforcing a namespace label of origin. + // Applies only if enforcedNamespaceLabel set to true. + // +optional + ExcludedFromEnforcement []ObjectReference `json:"excludedFromEnforcement,omitempty"` + // prometheusRulesExcludedFromEnforce defines a list of Prometheus rules to be excluded from enforcing + // of adding namespace labels. Works only if enforcedNamespaceLabel set to true. + // Make sure both ruleNamespace and ruleName are set for each pair + // Deprecated: use excludedFromEnforcement instead. + // +optional + PrometheusRulesExcludedFromEnforce []PrometheusRuleExcludeConfig `json:"prometheusRulesExcludedFromEnforce,omitempty"` + + // logLevel for ThanosRuler to be configured with. + // +kubebuilder:validation:Enum="";debug;info;warn;error + // +optional + LogLevel string `json:"logLevel,omitempty"` + // logFormat for ThanosRuler to be configured with. + // +kubebuilder:validation:Enum="";logfmt;json + // +optional + LogFormat string `json:"logFormat,omitempty"` + + // portName defines the port name used for the pods and governing service. + // Defaults to `web`. + // +kubebuilder:default:="web" + // +optional + PortName string `json:"portName,omitempty"` + + // evaluationInterval defines the interval between consecutive evaluations. + // +kubebuilder:default:="15s" + // +optional + EvaluationInterval Duration `json:"evaluationInterval,omitempty"` + + // resendDelay defines the minimum amount of time to wait before resending an alert to Alertmanager. + // +optional + ResendDelay *Duration `json:"resendDelay,omitempty"` + + // ruleOutageTolerance defines the max time to tolerate prometheus outage for restoring "for" state of alert. + // It requires Thanos >= v0.30.0. + // +optional + RuleOutageTolerance *Duration `json:"ruleOutageTolerance,omitempty"` + + // ruleQueryOffset defines the default rule group's query offset duration to use. + // It requires Thanos >= v0.38.0. + // +optional + RuleQueryOffset *Duration `json:"ruleQueryOffset,omitempty"` + + // ruleConcurrentEval defines how many rules can be evaluated concurrently. + // It requires Thanos >= v0.37.0. + // +kubebuilder:validation:Minimum=1 + // + // +optional + RuleConcurrentEval *int32 `json:"ruleConcurrentEval,omitempty"` + + // ruleGracePeriod defines the minimum duration between alert and restored "for" state. + // This is maintained only for alerts with configured "for" time greater than grace period. + // It requires Thanos >= v0.30.0. + // + // +optional + RuleGracePeriod *Duration `json:"ruleGracePeriod,omitempty"` + + // retention defines the time duration ThanosRuler shall retain data for. Default is '24h', and + // must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds + // seconds minutes hours days weeks years). + // + // The field has no effect when remote-write is configured since the Ruler + // operates in stateless mode. + // + // +kubebuilder:default:="24h" + // +optional + Retention Duration `json:"retention,omitempty"` + + // containers allows injecting additional containers or modifying operator generated + // containers. This can be used to allow adding an authentication proxy to a ThanosRuler pod or + // to change the behavior of an operator generated container. Containers described here modify + // an operator generated container if they share the same name and modifications are done via a + // strategic merge patch. The current container names are: `thanos-ruler` and `config-reloader`. + // Overriding containers is entirely outside the scope of what the maintainers will support and by doing + // so, you accept that this behaviour may break at any time without notice. + // +optional + Containers []v1.Container `json:"containers,omitempty"` + // initContainers allows adding initContainers to the pod definition. Those can be used to e.g. + // fetch secrets for injection into the ThanosRuler configuration from external sources. Any + // errors during the execution of an initContainer will lead to a restart of the Pod. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // Using initContainers for any use case other then secret fetching is entirely outside the scope + // of what the maintainers will support and by doing so, you accept that this behaviour may break + // at any time without notice. + // +optional + InitContainers []v1.Container `json:"initContainers,omitempty"` + + // tracingConfig defines the tracing configuration. + // + // The configuration format is defined at https://thanos.io/tip/thanos/tracing.md/#configuration + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // The operator performs no validation of the configuration. + // + // `tracingConfigFile` takes precedence over this field. + // + // +optional + TracingConfig *v1.SecretKeySelector `json:"tracingConfig,omitempty"` + // tracingConfigFile defines the path of the tracing configuration file. + // + // The configuration format is defined at https://thanos.io/tip/thanos/tracing.md/#configuration + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // The operator performs no validation of the configuration file. + // + // This field takes precedence over `tracingConfig`. + // + // +optional + TracingConfigFile string `json:"tracingConfigFile,omitempty"` + + // labels defines the external label pairs of the ThanosRuler resource. + // + // A default replica label `thanos_ruler_replica` will be always added as a + // label with the value of the pod's name. + // + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // alertDropLabels defines the label names which should be dropped in Thanos Ruler + // alerts. + // + // The replica label `thanos_ruler_replica` will always be dropped from the alerts. + // + // +optional + AlertDropLabels []string `json:"alertDropLabels,omitempty"` + + // externalPrefix defines the Thanos Ruler instances will be available under. This is + // necessary to generate correct URLs. This is necessary if Thanos Ruler is not + // served from root of a DNS name. + // +optional + ExternalPrefix string `json:"externalPrefix,omitempty"` + // routePrefix defines the route prefix ThanosRuler registers HTTP handlers for. This allows thanos UI to be served on a sub-path. + // +optional + RoutePrefix string `json:"routePrefix,omitempty"` + + // grpcServerTlsConfig defines the gRPC server from which Thanos Querier reads + // recorded rule data. + // Note: Currently only the CAFile, CertFile, and KeyFile fields are supported. + // Maps to the '--grpc-server-tls-*' CLI args. + // +optional + GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` + + // alertQueryUrl defines how Thanos Ruler will set in the 'Source' field + // of all alerts. + // Maps to the '--alert.query-url' CLI arg. + // +optional + AlertQueryURL string `json:"alertQueryUrl,omitempty"` + + // minReadySeconds defines the minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing for it to be considered available. + // + // If unset, pods will be considered available as soon as they are ready. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + + // alertRelabelConfigs defines the alert relabeling in Thanos Ruler. + // + // Alert relabel configuration must have the form as specified in the + // official Prometheus documentation: + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs + // + // The operator performs no validation of the configuration. + // + // `alertRelabelConfigFile` takes precedence over this field. + // + // +optional + AlertRelabelConfigs *v1.SecretKeySelector `json:"alertRelabelConfigs,omitempty"` + // alertRelabelConfigFile defines the path to the alert relabeling configuration file. + // + // Alert relabel configuration must have the form as specified in the + // official Prometheus documentation: + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs + // + // The operator performs no validation of the configuration file. + // + // This field takes precedence over `alertRelabelConfig`. + // + // +optional + AlertRelabelConfigFile *string `json:"alertRelabelConfigFile,omitempty"` + + // hostAliases defines pods' hostAliases configuration + // +listType=map + // +listMapKey=ip + // +optional + HostAliases []HostAlias `json:"hostAliases,omitempty"` + + // additionalArgs defines how to add additional arguments for the ThanosRuler container. + // It is intended for e.g. activating hidden flags which are not supported by + // the dedicated configuration options yet. The arguments are passed as-is to the + // ThanosRuler container which may cause issues if they are invalid or not supported + // by the given ThanosRuler version. + // In case of an argument conflict (e.g. an argument which is already set by the + // operator itself) or when providing an invalid argument the reconciliation will + // fail and an error will be logged. + // +optional + AdditionalArgs []Argument `json:"additionalArgs,omitempty"` + + // web defines the configuration of the ThanosRuler web server. + // +optional + Web *ThanosRulerWebSpec `json:"web,omitempty"` + + // remoteWrite defines the list of remote write configurations. + // + // When the list isn't empty, the ruler is configured with stateless mode. + // + // It requires Thanos >= 0.24.0. + // + // +optional + RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"` + + // terminationGracePeriodSeconds defines the optional duration in seconds the pod needs to terminate gracefully. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down) which may lead to data corruption. + // + // Defaults to 120 seconds. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + + // enableFeatures defines how to setup Thanos Ruler feature flags. By default, no features are enabled. + // + // Enabling features which are disabled by default is entirely outside the + // scope of what the maintainers will support and by doing so, you accept + // that this behaviour may break at any time without notice. + // + // For more information see https://thanos.io/tip/components/rule.md/ + // + // It requires Thanos >= 0.39.0. + // +listType:=set + // +optional + EnableFeatures []EnableFeature `json:"enableFeatures,omitempty"` + + // hostUsers supports the user space in Kubernetes. + // + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/user-namespaces/ + // + // + // The feature requires at least Kubernetes 1.28 with the `UserNamespacesSupport` feature gate enabled. + // Starting Kubernetes 1.33, the feature is enabled by default. + // + // +optional + HostUsers *bool `json:"hostUsers,omitempty"` +} + +// ThanosRulerWebSpec defines the configuration of the ThanosRuler web server. +// +k8s:openapi-gen=true +type ThanosRulerWebSpec struct { + // +optional + WebConfigFileFields `json:",inline"` +} + +// ThanosRulerStatus is the most recent observed status of the ThanosRuler. Read-only. +// More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type ThanosRulerStatus struct { + // paused defines whether any actions on the underlying managed objects are + // being performed. Only delete actions will be performed. + // +optional + Paused bool `json:"paused"` + // replicas defines the total number of non-terminated pods targeted by this ThanosRuler deployment + // (their labels match the selector). + // +optional + Replicas int32 `json:"replicas"` + // updatedReplicas defines the total number of non-terminated pods targeted by this ThanosRuler deployment + // that have the desired version spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas"` + // availableReplicas defines the total number of available pods (ready for at least minReadySeconds) + // targeted by this ThanosRuler deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas"` + // unavailableReplicas defines the total number of unavailable pods targeted by this ThanosRuler deployment. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas"` + // conditions defines the current state of the ThanosRuler object. + // +listType=map + // +listMapKey=type + // +optional + Conditions []Condition `json:"conditions,omitempty"` +} + +func (tr *ThanosRuler) ExpectedReplicas() int { + if tr.Spec.Replicas == nil { + return 1 + } + return int(*tr.Spec.Replicas) +} + +func (tr *ThanosRuler) SetReplicas(i int) { tr.Status.Replicas = int32(i) } +func (tr *ThanosRuler) SetUpdatedReplicas(i int) { tr.Status.UpdatedReplicas = int32(i) } +func (tr *ThanosRuler) SetAvailableReplicas(i int) { tr.Status.AvailableReplicas = int32(i) } +func (tr *ThanosRuler) SetUnavailableReplicas(i int) { tr.Status.UnavailableReplicas = int32(i) } + +// DeepCopyObject implements the runtime.Object interface. +func (l *ThanosRuler) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ThanosRulerList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go new file mode 100644 index 0000000000..eb110df7ef --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go @@ -0,0 +1,1127 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "errors" + "fmt" + "net/url" + "reflect" + "strings" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" +) + +const ( + Version = "v1" +) + +// ByteSize is a valid memory size type based on powers-of-2, so 1KB is 1024B. +// Supported units: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB, EB, EiB Ex: `512MB`. +// +kubebuilder:validation:Pattern:="(^0|([0-9]*[.])?[0-9]+((K|M|G|T|E|P)i?)?B)$" +type ByteSize string + +func (bs *ByteSize) IsEmpty() bool { + return bs == nil || *bs == "" +} + +// Duration is a valid time duration that can be parsed by Prometheus model.ParseDuration() function. +// Supported units: y, w, d, h, m, s, ms +// Examples: `30s`, `1m`, `1h20m15s`, `15d` +// +kubebuilder:validation:Pattern:="^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$" +type Duration string + +// DurationPointer is a helper function to parse a Duration string into a *Duration. +func DurationPointer(s string) *Duration { + d := Duration(s) + return &d +} + +// NonEmptyDuration is a valid time duration that can be parsed by Prometheus model.ParseDuration() function. +// Compared to Duration, NonEmptyDuration enforces a minimum length of 1. +// Supported units: y, w, d, h, m, s, ms +// Examples: `30s`, `1m`, `1h20m15s`, `15d` +// +kubebuilder:validation:Pattern:="^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$" +// +kubebuilder:validation:MinLength=1 +type NonEmptyDuration string + +// GoDuration is a valid time duration that can be parsed by Go's time.ParseDuration() function. +// Supported units: h, m, s, ms +// Examples: `45ms`, `30s`, `1m`, `1h20m15s` +// +kubebuilder:validation:Pattern:="^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$" +type GoDuration string + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + // ip defines the IP address of the host file entry. + // +required + IP string `json:"ip"` + // hostnames defines hostnames for the above IP address. + // +required + Hostnames []string `json:"hostnames"` +} + +// PrometheusRuleExcludeConfig enables users to configure excluded +// PrometheusRule names and their namespaces to be ignored while enforcing +// namespace label for alerts and metrics. +type PrometheusRuleExcludeConfig struct { + // ruleNamespace defines the namespace of the excluded PrometheusRule object. + // +required + RuleNamespace string `json:"ruleNamespace"` + // ruleName defines the name of the excluded PrometheusRule object. + // +required + RuleName string `json:"ruleName"` +} + +type ProxyConfig struct { + // proxyUrl defines the HTTP proxy server to use. + // + // +kubebuilder:validation:Pattern:="^(http|https|socks5)://.+$" + // +optional + ProxyURL *string `json:"proxyUrl,omitempty"` + // noProxy defines a comma-separated string that can contain IPs, CIDR notation, domain names + // that should be excluded from proxying. IP and domain names can + // contain port numbers. + // + // It requires Prometheus >= v2.43.0, Alertmanager >= v0.25.0 or Thanos >= v0.32.0. + // +optional + NoProxy *string `json:"noProxy,omitempty"` + // proxyFromEnvironment defines whether to use the proxy configuration defined by environment variables (HTTP_PROXY, HTTPS_PROXY, and NO_PROXY). + // + // It requires Prometheus >= v2.43.0, Alertmanager >= v0.25.0 or Thanos >= v0.32.0. + // +optional + ProxyFromEnvironment *bool `json:"proxyFromEnvironment,omitempty"` + // proxyConnectHeader optionally specifies headers to send to + // proxies during CONNECT requests. + // + // It requires Prometheus >= v2.43.0, Alertmanager >= v0.25.0 or Thanos >= v0.32.0. + // +optional + // +mapType:=atomic + ProxyConnectHeader map[string][]v1.SecretKeySelector `json:"proxyConnectHeader,omitempty"` +} + +// Validate semantically validates the given ProxyConfig. +func (pc *ProxyConfig) Validate() error { + if pc == nil { + return nil + } + + if reflect.ValueOf(pc).IsZero() { + return nil + } + + proxyFromEnvironmentDefined := pc.ProxyFromEnvironment != nil && *pc.ProxyFromEnvironment + proxyURLDefined := pc.ProxyURL != nil && *pc.ProxyURL != "" + noProxyDefined := pc.NoProxy != nil && *pc.NoProxy != "" + + if len(pc.ProxyConnectHeader) > 0 && (!proxyFromEnvironmentDefined && !proxyURLDefined) { + return fmt.Errorf("if proxyConnectHeader is configured, proxyUrl or proxyFromEnvironment must also be configured") + } + + if proxyFromEnvironmentDefined && proxyURLDefined { + return fmt.Errorf("if proxyFromEnvironment is configured, proxyUrl must not be configured") + } + + if proxyFromEnvironmentDefined && noProxyDefined { + return fmt.Errorf("if proxyFromEnvironment is configured, noProxy must not be configured") + } + + if !proxyURLDefined && noProxyDefined { + return fmt.Errorf("if noProxy is configured, proxyUrl must also be configured") + } + + for k, v := range pc.ProxyConnectHeader { + if len(v) == 0 { + return fmt.Errorf("proxyConnetHeader[%s]: selector must not be empty", k) + } + for i, sel := range v { + if sel == (v1.SecretKeySelector{}) { + return fmt.Errorf("proxyConnectHeader[%s][%d]: selector must be defined", k, i) + } + } + } + + if pc.ProxyURL != nil { + if _, err := url.Parse(*pc.ProxyURL); err != nil { + return err + } + } + return nil +} + +// ObjectReference references a PodMonitor, ServiceMonitor, Probe or PrometheusRule object. +type ObjectReference struct { + // group of the referent. When not specified, it defaults to `monitoring.coreos.com` + // +optional + // +kubebuilder:default:="monitoring.coreos.com" + // +kubebuilder:validation:Enum=monitoring.coreos.com + Group string `json:"group"` + // resource of the referent. + // +required + // +kubebuilder:validation:Enum=prometheusrules;servicemonitors;podmonitors;probes;scrapeconfigs + Resource string `json:"resource"` + // namespace of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + // +required + // +kubebuilder:validation:MinLength=1 + Namespace string `json:"namespace"` + // name of the referent. When not set, all resources in the namespace are matched. + // +optional + Name string `json:"name,omitempty"` +} + +func (obj *ObjectReference) GroupResource() schema.GroupResource { + return schema.GroupResource{ + Resource: obj.Resource, + Group: obj.getGroup(), + } +} + +func (obj *ObjectReference) GroupKind() schema.GroupKind { + return schema.GroupKind{ + Kind: monitoring.ResourceToKind(obj.Resource), + Group: obj.getGroup(), + } +} + +// getGroup returns the group of the object. +// It is mostly needed for tests which don't create objects through the API and don't benefit from the default value. +func (obj *ObjectReference) getGroup() string { + if obj.Group == "" { + return monitoring.GroupName + } + return obj.Group +} + +// ArbitraryFSAccessThroughSMsConfig enables users to configure, whether +// a service monitor selected by the Prometheus instance is allowed to use +// arbitrary files on the file system of the Prometheus container. This is the case +// when e.g. a service monitor specifies a BearerTokenFile in an endpoint. A +// malicious user could create a service monitor selecting arbitrary secret files +// in the Prometheus container. Those secrets would then be sent with a scrape +// request by Prometheus to a malicious target. Denying the above would prevent the +// attack, users can instead use the BearerTokenSecret field. +type ArbitraryFSAccessThroughSMsConfig struct { + // deny prevents service monitors from accessing arbitrary files on the file system. + // When true, service monitors cannot use file-based configurations like BearerTokenFile + // that could potentially access sensitive files. When false (default), such access is allowed. + // Setting this to true enhances security by preventing potential credential theft attacks. + // + // +optional + Deny bool `json:"deny,omitempty"` +} + +// Condition represents the state of the resources associated with the +// Prometheus, Alertmanager or ThanosRuler resource. +// +k8s:deepcopy-gen=true +type Condition struct { + // type of the condition being reported. + // +required + Type ConditionType `json:"type"` + // status of the condition. + // +required + Status ConditionStatus `json:"status"` + // lastTransitionTime is the time of the last update to the current status property. + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + // reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // message defines human-readable message indicating details for the condition's last transition. + // +optional + Message string `json:"message,omitempty"` + // observedGeneration defines the .metadata.generation that the + // condition was set based upon. For instance, if `.metadata.generation` is + // currently 12, but the `.status.conditions[].observedGeneration` is 9, the + // condition is out of date with respect to the current state of the + // instance. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +kubebuilder:validation:MinLength=1 +type ConditionType string + +const ( + // Available indicates whether enough pods are ready to provide the + // service. + // The possible status values for this condition type are: + // - True: all pods are running and ready, the service is fully available. + // - Degraded: some pods aren't ready, the service is partially available. + // - False: no pods are running, the service is totally unavailable. + // - Unknown: the operator couldn't determine the condition status. + Available ConditionType = "Available" + // Reconciled indicates whether the operator has reconciled the state of + // the underlying resources with the object's spec. + // The possible status values for this condition type are: + // - True: the reconciliation was successful. + // - False: the reconciliation failed. + // - Unknown: the operator couldn't determine the condition status. + Reconciled ConditionType = "Reconciled" + // Accepted indicates whether the workload controller has successfully accepted + // the configuration resource and updated the configuration of the workload accordingly. + // The possible status values for this condition type are: + // - True: the configuration resource was successfully accepted by the controller and written to the configuration secret. + // - False: the controller rejected the configuration due to an error. + // - Unknown: the operator couldn't determine the condition status. + Accepted ConditionType = "Accepted" +) + +// +kubebuilder:validation:MinLength=1 +type ConditionStatus string + +const ( + ConditionTrue ConditionStatus = "True" + ConditionDegraded ConditionStatus = "Degraded" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// EmbeddedPersistentVolumeClaim is an embedded version of k8s.io/api/core/v1.PersistentVolumeClaim. +// It contains TypeMeta and a reduced ObjectMeta. +type EmbeddedPersistentVolumeClaim struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines EmbeddedMetadata contains metadata relevant to an EmbeddedResource. + // +optional + EmbeddedObjectMetadata `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec defines the specification of the characteristics of a volume requested by a pod author. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + Spec v1.PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // status is deprecated: this field is never set. + // +optional + Status v1.PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// EmbeddedObjectMetadata contains a subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta +// Only fields which are relevant to embedded resources are included. +type EmbeddedObjectMetadata struct { + // name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/ + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // labels define the map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // annotations defines an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` +} + +// WebConfigFileFields defines the file content for --web.config.file flag. +// +k8s:deepcopy-gen=true +type WebConfigFileFields struct { + // tlsConfig defines the TLS parameters for HTTPS. + // +optional + TLSConfig *WebTLSConfig `json:"tlsConfig,omitempty"` + // httpConfig defines HTTP parameters for web server. + // +optional + HTTPConfig *WebHTTPConfig `json:"httpConfig,omitempty"` +} + +// WebHTTPConfig defines HTTP parameters for web server. +// +k8s:openapi-gen=true +type WebHTTPConfig struct { + // http2 enable HTTP/2 support. Note that HTTP/2 is only supported with TLS. + // When TLSConfig is not configured, HTTP/2 will be disabled. + // Whenever the value of the field changes, a rolling update will be triggered. + // +optional + HTTP2 *bool `json:"http2,omitempty"` + // headers defines a list of headers that can be added to HTTP responses. + // +optional + Headers *WebHTTPHeaders `json:"headers,omitempty"` +} + +// WebHTTPHeaders defines the list of headers that can be added to HTTP responses. +// +k8s:openapi-gen=true +type WebHTTPHeaders struct { + // contentSecurityPolicy defines the Content-Security-Policy header to HTTP responses. + // Unset if blank. + // +optional + ContentSecurityPolicy string `json:"contentSecurityPolicy,omitempty"` + // xFrameOptions defines the X-Frame-Options header to HTTP responses. + // Unset if blank. Accepted values are deny and sameorigin. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options + // +kubebuilder:validation:Enum="";Deny;SameOrigin + // +optional + XFrameOptions string `json:"xFrameOptions,omitempty"` + // xContentTypeOptions defines the X-Content-Type-Options header to HTTP responses. + // Unset if blank. Accepted value is nosniff. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options + // +kubebuilder:validation:Enum="";NoSniff + // +optional + XContentTypeOptions string `json:"xContentTypeOptions,omitempty"` + // xXSSProtection defines the X-XSS-Protection header to all responses. + // Unset if blank. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection + // +optional + XXSSProtection string `json:"xXSSProtection,omitempty"` + // strictTransportSecurity defines the Strict-Transport-Security header to HTTP responses. + // Unset if blank. + // Please make sure that you use this with care as this header might force + // browsers to load Prometheus and the other applications hosted on the same + // domain and subdomains over HTTPS. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security + // +optional + StrictTransportSecurity string `json:"strictTransportSecurity,omitempty"` +} + +// WebTLSConfig defines the TLS parameters for HTTPS. +// +k8s:openapi-gen=true +type WebTLSConfig struct { + // cert defines the Secret or ConfigMap containing the TLS certificate for the web server. + // + // Either `keySecret` or `keyFile` must be defined. + // + // It is mutually exclusive with `certFile`. + // + // +optional + Cert SecretOrConfigMap `json:"cert,omitempty"` + // certFile defines the path to the TLS certificate file in the container for the web server. + // + // Either `keySecret` or `keyFile` must be defined. + // + // It is mutually exclusive with `cert`. + // + // +optional + CertFile *string `json:"certFile,omitempty"` + + // keySecret defines the secret containing the TLS private key for the web server. + // + // Either `cert` or `certFile` must be defined. + // + // It is mutually exclusive with `keyFile`. + // + // +optional + KeySecret v1.SecretKeySelector `json:"keySecret,omitempty"` + // keyFile defines the path to the TLS private key file in the container for the web server. + // + // If defined, either `cert` or `certFile` must be defined. + // + // It is mutually exclusive with `keySecret`. + // + // +optional + KeyFile *string `json:"keyFile,omitempty"` + + // client_ca defines the Secret or ConfigMap containing the CA certificate for client certificate + // authentication to the server. + // + // It is mutually exclusive with `clientCAFile`. + // + // +optional + //nolint:kubeapilinter // The json tag doesn't meet the conventions to be compatible with Prometheus format. + ClientCA SecretOrConfigMap `json:"client_ca,omitempty"` + // clientCAFile defines the path to the CA certificate file for client certificate authentication to + // the server. + // + // It is mutually exclusive with `client_ca`. + // + // +optional + ClientCAFile *string `json:"clientCAFile,omitempty"` + // clientAuthType defines the server policy for client TLS authentication. + // + // For more detail on clientAuth options: + // https://golang.org/pkg/crypto/tls/#ClientAuthType + // + // +optional + ClientAuthType *string `json:"clientAuthType,omitempty"` + + // minVersion defines the minimum TLS version that is acceptable. + // + // +optional + MinVersion *string `json:"minVersion,omitempty"` + // maxVersion defines the Maximum TLS version that is acceptable. + // + // +optional + MaxVersion *string `json:"maxVersion,omitempty"` + + // cipherSuites defines the list of supported cipher suites for TLS versions up to TLS 1.2. + // + // If not defined, the Go default cipher suites are used. + // Available cipher suites are documented in the Go documentation: + // https://golang.org/pkg/crypto/tls/#pkg-constants + // + // +optional + CipherSuites []string `json:"cipherSuites,omitempty"` + + // preferServerCipherSuites defines whether the server selects the client's most preferred cipher + // suite, or the server's most preferred cipher suite. + // + // If true then the server's preference, as expressed in + // the order of elements in cipherSuites, is used. + // + // +optional + PreferServerCipherSuites *bool `json:"preferServerCipherSuites,omitempty"` + + // curvePreferences defines elliptic curves that will be used in an ECDHE handshake, in preference + // order. + // + // Available curves are documented in the Go documentation: + // https://golang.org/pkg/crypto/tls/#CurveID + // + // +optional + CurvePreferences []string `json:"curvePreferences,omitempty"` +} + +// Validate returns an error if one of the WebTLSConfig fields is invalid. +// A valid WebTLSConfig should have (Cert or CertFile) and (KeySecret or KeyFile) fields which are not +// zero values. +func (c *WebTLSConfig) Validate() error { + if c == nil { + return nil + } + + if c.ClientCA != (SecretOrConfigMap{}) { + if c.ClientCAFile != nil && *c.ClientCAFile != "" { + return errors.New("cannot specify both clientCAFile and clientCA") + } + + if err := c.ClientCA.Validate(); err != nil { + return fmt.Errorf("invalid client CA: %w", err) + } + } + + if c.Cert != (SecretOrConfigMap{}) { + if c.CertFile != nil && *c.CertFile != "" { + return errors.New("cannot specify both cert and certFile") + } + if err := c.Cert.Validate(); err != nil { + return fmt.Errorf("invalid TLS certificate: %w", err) + } + } + + if c.KeyFile != nil && *c.KeyFile != "" && c.KeySecret != (v1.SecretKeySelector{}) { + return errors.New("cannot specify both keyFile and keySecret") + } + + if (c.KeyFile == nil || *c.KeyFile == "") && c.KeySecret == (v1.SecretKeySelector{}) { + return errors.New("TLS private key must be defined") + } + + if (c.CertFile == nil || *c.CertFile == "") && c.Cert == (SecretOrConfigMap{}) { + return errors.New("TLS certificate must be defined") + } + + return nil +} + +// LabelName is a valid Prometheus label name. +// For Prometheus 3.x, a label name is valid if it contains UTF-8 characters. +// For Prometheus 2.x, a label name is only valid if it contains ASCII characters, letters, numbers, as well as underscores. +type LabelName string + +// Endpoint defines an endpoint serving Prometheus metrics to be scraped by +// Prometheus. +// +// +k8s:openapi-gen=true +type Endpoint struct { + // port defines the name of the Service port which this endpoint refers to. + // + // It takes precedence over `targetPort`. + // +optional + Port string `json:"port,omitempty"` + + // targetPort defines the name or number of the target port of the `Pod` object behind the + // Service. The port must be specified with the container's port property. + // + // +optional + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` + + // path defines the HTTP path from which to scrape for metrics. + // + // If empty, Prometheus uses the default value (e.g. `/metrics`). + // +optional + Path string `json:"path,omitempty"` + + // scheme defines the HTTP scheme to use for scraping. + // + // `http` and `https` are the expected values unless you rewrite the + // `__scheme__` label via relabeling. + // + // If empty, Prometheus uses the default value `http`. + // + // +kubebuilder:validation:Enum=http;https + // +optional + Scheme string `json:"scheme,omitempty"` + + // params define optional HTTP URL parameters. + // +optional + Params map[string][]string `json:"params,omitempty"` + + // interval at which Prometheus scrapes the metrics from the target. + // + // If empty, Prometheus uses the global scrape interval. + // +optional + Interval Duration `json:"interval,omitempty"` + + // scrapeTimeout defines the timeout after which Prometheus considers the scrape to be failed. + // + // If empty, Prometheus uses the global scrape timeout unless it is less + // than the target's scrape interval value in which the latter is used. + // The value cannot be greater than the scrape interval otherwise the operator will reject the resource. + // +optional + ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` + + // tlsConfig defines the TLS configuration to use when scraping the target. + // + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + + // bearerTokenFile defines the file to read bearer token for scraping the target. + // + // Deprecated: use `authorization` instead. + // +optional + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + + // bearerTokenSecret defines a key of a Secret containing the bearer + // token for scraping targets. The secret needs to be in the same namespace + // as the ServiceMonitor object and readable by the Prometheus Operator. + // + // +optional + // + // Deprecated: use `authorization` instead. + BearerTokenSecret *v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` + + // authorization configures the Authorization header credentials to use when + // scraping the target. + // + // Cannot be set at the same time as `basicAuth`, or `oauth2`. + // + // +optional + Authorization *SafeAuthorization `json:"authorization,omitempty"` + + // honorLabels defines when true the metric's labels when they collide + // with the target's labels. + // +optional + HonorLabels bool `json:"honorLabels,omitempty"` + + // honorTimestamps defines whether Prometheus preserves the timestamps + // when exposed by the target. + // + // +optional + HonorTimestamps *bool `json:"honorTimestamps,omitempty"` + + // trackTimestampsStaleness defines whether Prometheus tracks staleness of + // the metrics that have an explicit timestamp present in scraped data. + // Has no effect if `honorTimestamps` is false. + // + // It requires Prometheus >= v2.48.0. + // + // +optional + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` + + // basicAuth defines the Basic Authentication credentials to use when + // scraping the target. + // + // Cannot be set at the same time as `authorization`, or `oauth2`. + // + // +optional + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + + // oauth2 defines the OAuth2 settings to use when scraping the target. + // + // It requires Prometheus >= 2.27.0. + // + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // + // +optional + OAuth2 *OAuth2 `json:"oauth2,omitempty"` + + // metricRelabelings defines the relabeling rules to apply to the + // samples before ingestion. + // + // +optional + MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"` + + // relabelings defines the relabeling rules to apply the target's + // metadata labels. + // + // The Operator automatically adds relabelings for a few standard Kubernetes fields. + // + // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. + // + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // + // +optional + RelabelConfigs []RelabelConfig `json:"relabelings,omitempty"` + + // +optional + ProxyConfig `json:",inline"` + + // followRedirects defines whether the scrape requests should follow HTTP + // 3xx redirects. + // + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` + + // enableHttp2 can be used to disable HTTP2 when scraping the target. + // + // +optional + EnableHttp2 *bool `json:"enableHttp2,omitempty"` + + // filterRunning when true, the pods which are not running (e.g. either in Failed or + // Succeeded state) are dropped during the target discovery. + // + // If unset, the filtering is enabled. + // + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase + // + // +optional + FilterRunning *bool `json:"filterRunning,omitempty"` +} + +type AttachMetadata struct { + // node when set to true, Prometheus attaches node metadata to the discovered + // targets. + // + // The Prometheus service account must have the `list` and `watch` + // permissions on the `Nodes` objects. + // + // +optional + Node *bool `json:"node,omitempty"` +} + +// OAuth2 configures OAuth2 settings. +// +// +k8s:openapi-gen=true +type OAuth2 struct { + // clientId defines a key of a Secret or ConfigMap containing the + // OAuth2 client's ID. + // +required + ClientID SecretOrConfigMap `json:"clientId"` + + // clientSecret defines a key of a Secret containing the OAuth2 + // client's secret. + // +required + ClientSecret v1.SecretKeySelector `json:"clientSecret"` + + // tokenUrl defines the URL to fetch the token from. + // + // +kubebuilder:validation:MinLength=1 + // +required + TokenURL string `json:"tokenUrl"` + + // scopes defines the OAuth2 scopes used for the token request. + // + // +optional. + Scopes []string `json:"scopes,omitempty"` + + // endpointParams configures the HTTP parameters to append to the token + // URL. + // + // +optional + EndpointParams map[string]string `json:"endpointParams,omitempty"` + + // tlsConfig defines the TLS configuration to use when connecting to the OAuth2 server. + // It requires Prometheus >= v2.43.0. + // + // +optional + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` + + // Proxy configuration to use when connecting to the OAuth2 server. + // It requires Prometheus >= v2.43.0. + // + // +optional + ProxyConfig `json:",inline"` +} + +func (o *OAuth2) Validate() error { + if o == nil { + return nil + } + + if o.TokenURL == "" { + return errors.New("OAuth2 tokenURL must be specified") + } + + if o.ClientID == (SecretOrConfigMap{}) { + return errors.New("OAuth2 clientID must be specified") + } + + if err := o.ClientID.Validate(); err != nil { + return fmt.Errorf("invalid OAuth2 clientID: %w", err) + } + + if err := o.TLSConfig.Validate(); err != nil { + return fmt.Errorf("invalid OAuth2 tlsConfig: %w", err) + } + + return nil +} + +// BasicAuth configures HTTP Basic Authentication settings. +// +// +k8s:openapi-gen=true +type BasicAuth struct { + // username defines a key of a Secret containing the username for + // authentication. + // +optional + Username v1.SecretKeySelector `json:"username,omitempty"` + + // password defines a key of a Secret containing the password for + // authentication. + // +optional + Password v1.SecretKeySelector `json:"password,omitempty"` +} + +// SecretOrConfigMap allows to specify data as a Secret or ConfigMap. Fields are mutually exclusive. +type SecretOrConfigMap struct { + // secret defines the Secret containing data to use for the targets. + // +optional + Secret *v1.SecretKeySelector `json:"secret,omitempty"` + // configMap defines the ConfigMap containing data to use for the targets. + // +optional + ConfigMap *v1.ConfigMapKeySelector `json:"configMap,omitempty"` +} + +// Validate semantically validates the given SecretOrConfigMap. +func (c *SecretOrConfigMap) Validate() error { + if c == nil { + return nil + } + + if c.Secret != nil && c.ConfigMap != nil { + return fmt.Errorf("cannot specify both Secret and ConfigMap") + } + + return nil +} + +func (c *SecretOrConfigMap) String() string { + if c == nil { + return "" + } + + switch { + case c.Secret != nil: + return fmt.Sprintf("", c.Secret.LocalObjectReference.Name, c.Secret.Key) + case c.ConfigMap != nil: + return fmt.Sprintf("", c.ConfigMap.LocalObjectReference.Name, c.ConfigMap.Key) + } + + return "" +} + +// +kubebuilder:validation:Enum=TLS10;TLS11;TLS12;TLS13 +type TLSVersion string + +const ( + TLSVersion10 TLSVersion = "TLS10" + TLSVersion11 TLSVersion = "TLS11" + TLSVersion12 TLSVersion = "TLS12" + TLSVersion13 TLSVersion = "TLS13" +) + +// SafeTLSConfig specifies safe TLS configuration parameters. +// +k8s:openapi-gen=true +type SafeTLSConfig struct { + // ca defines the Certificate authority used when verifying server certificates. + // +optional + CA SecretOrConfigMap `json:"ca,omitempty"` + + // cert defines the Client certificate to present when doing client-authentication. + // +optional + Cert SecretOrConfigMap `json:"cert,omitempty"` + + // keySecret defines the Secret containing the client key file for the targets. + // +optional + KeySecret *v1.SecretKeySelector `json:"keySecret,omitempty"` + + // serverName is used to verify the hostname for the targets. + // +optional + ServerName *string `json:"serverName,omitempty"` + + // insecureSkipVerify defines how to disable target certificate validation. + // +optional + InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"` + + // minVersion defines the minimum acceptable TLS version. + // + // It requires Prometheus >= v2.35.0 or Thanos >= v0.28.0. + // +optional + MinVersion *TLSVersion `json:"minVersion,omitempty"` + + // maxVersion defines the maximum acceptable TLS version. + // + // It requires Prometheus >= v2.41.0 or Thanos >= v0.31.0. + // +optional + MaxVersion *TLSVersion `json:"maxVersion,omitempty"` +} + +// Validate semantically validates the given SafeTLSConfig. +func (c *SafeTLSConfig) Validate() error { + if c == nil { + return nil + } + + if c.CA != (SecretOrConfigMap{}) { + if err := c.CA.Validate(); err != nil { + return fmt.Errorf("ca %s: %w", c.CA.String(), err) + } + } + + if c.Cert != (SecretOrConfigMap{}) { + if err := c.Cert.Validate(); err != nil { + return fmt.Errorf("cert %s: %w", c.Cert.String(), err) + } + } + + if c.Cert != (SecretOrConfigMap{}) && c.KeySecret == nil { + return fmt.Errorf("client cert specified without client key") + } + + if c.KeySecret != nil && c.Cert == (SecretOrConfigMap{}) { + return fmt.Errorf("client key specified without client cert") + } + + if c.MaxVersion != nil && c.MinVersion != nil && strings.Compare(string(*c.MaxVersion), string(*c.MinVersion)) == -1 { + return fmt.Errorf("maxVersion must more than or equal to minVersion") + } + + return nil +} + +// TLSConfig extends the safe TLS configuration with file parameters. +// +k8s:openapi-gen=true +type TLSConfig struct { + // +optional + SafeTLSConfig `json:",inline"` + // caFile defines the path to the CA cert in the Prometheus container to use for the targets. + // +optional + CAFile string `json:"caFile,omitempty"` + // certFile defines the path to the client cert file in the Prometheus container for the targets. + // +optional + CertFile string `json:"certFile,omitempty"` + // keyFile defines the path to the client key file in the Prometheus container for the targets. + // +optional + KeyFile string `json:"keyFile,omitempty"` +} + +// Validate semantically validates the given TLSConfig. +func (c *TLSConfig) Validate() error { + if c == nil { + return nil + } + + if c.CA != (SecretOrConfigMap{}) { + if c.CAFile != "" { + return fmt.Errorf("cannot specify both caFile and ca") + } + if err := c.CA.Validate(); err != nil { + return fmt.Errorf("SecretOrConfigMap ca: %w", err) + } + } + + if c.Cert != (SecretOrConfigMap{}) { + if c.CertFile != "" { + return fmt.Errorf("cannot specify both certFile and cert") + } + if err := c.Cert.Validate(); err != nil { + return fmt.Errorf("SecretOrConfigMap cert: %w", err) + } + } + + if c.KeyFile != "" && c.KeySecret != nil { + return fmt.Errorf("cannot specify both keyFile and keySecret") + } + + hasCert := c.CertFile != "" || c.Cert != (SecretOrConfigMap{}) + hasKey := c.KeyFile != "" || c.KeySecret != nil + + if hasCert && !hasKey { + return fmt.Errorf("cannot specify client cert without client key") + } + + if hasKey && !hasCert { + return fmt.Errorf("cannot specify client key without client cert") + } + + if c.MaxVersion != nil && c.MinVersion != nil && strings.Compare(string(*c.MaxVersion), string(*c.MinVersion)) == -1 { + return fmt.Errorf("maxVersion must more than or equal to minVersion") + } + + return nil +} + +// NamespaceSelector is a selector for selecting either all namespaces or a +// list of namespaces. +// If `any` is true, it takes precedence over `matchNames`. +// If `matchNames` is empty and `any` is false, it means that the objects are +// selected from the current namespace. +// +k8s:openapi-gen=true +type NamespaceSelector struct { + // any defines the boolean describing whether all namespaces are selected in contrast to a + // list restricting them. + // +optional + Any bool `json:"any,omitempty"` + // matchNames defines the list of namespace names to select from. + // +optional + MatchNames []string `json:"matchNames,omitempty"` + + // TODO(fabxc): this should embed metav1.LabelSelector eventually. + // Currently the selector is only used for namespaces which require more complex + // implementation to support label selections. +} + +// Argument as part of the AdditionalArgs list. +// +k8s:openapi-gen=true +type Argument struct { + // name of the argument, e.g. "scrape.discovery-reload-interval". + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + // value defines the argument value, e.g. 30s. Can be empty for name-only arguments (e.g. --storage.tsdb.no-lockfile) + // +optional + Value string `json:"value,omitempty"` +} + +// The valid options for Role. +const ( + RoleNode = "node" + RolePod = "pod" + RoleService = "service" + RoleEndpoint = "endpoints" + RoleEndpointSlice = "endpointslice" + RoleIngress = "ingress" +) + +// NativeHistogramConfig extends the native histogram configuration settings. +// +k8s:openapi-gen=true +type NativeHistogramConfig struct { + // scrapeClassicHistograms defines whether to scrape a classic histogram that is also exposed as a native histogram. + // It requires Prometheus >= v2.45.0. + // + // Notice: `scrapeClassicHistograms` corresponds to the `always_scrape_classic_histograms` field in the Prometheus configuration. + // + // +optional + ScrapeClassicHistograms *bool `json:"scrapeClassicHistograms,omitempty"` + + // nativeHistogramBucketLimit defines ff there are more than this many buckets in a native histogram, + // buckets will be merged to stay within the limit. + // It requires Prometheus >= v2.45.0. + // + // +optional + NativeHistogramBucketLimit *uint64 `json:"nativeHistogramBucketLimit,omitempty"` + + // nativeHistogramMinBucketFactor defines if the growth factor of one bucket to the next is smaller than this, + // buckets will be merged to increase the factor sufficiently. + // It requires Prometheus >= v2.50.0. + // + // +optional + NativeHistogramMinBucketFactor *resource.Quantity `json:"nativeHistogramMinBucketFactor,omitempty"` + + // convertClassicHistogramsToNHCB defines whether to convert all scraped classic histograms into a native histogram with custom buckets. + // It requires Prometheus >= v3.0.0. + // + // +optional + ConvertClassicHistogramsToNHCB *bool `json:"convertClassicHistogramsToNHCB,omitempty"` +} + +// +kubebuilder:validation:Enum=RelabelConfig;RoleSelector +type SelectorMechanism string + +const ( + SelectorMechanismRelabel SelectorMechanism = "RelabelConfig" + SelectorMechanismRole SelectorMechanism = "RoleSelector" +) + +// ConfigResourceStatus is the most recent observed status of the Configuration Resource (ServiceMonitor, PodMonitor, Probes, ScrapeConfig, PrometheusRule or AlertmanagerConfig). Read-only. +// More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type ConfigResourceStatus struct { + // bindings defines the list of workload resources (Prometheus, PrometheusAgent, ThanosRuler or Alertmanager) which select the configuration resource. + // +listType=map + // +listMapKey=group + // +listMapKey=resource + // +listMapKey=name + // +listMapKey=namespace + // +optional + Bindings []WorkloadBinding `json:"bindings,omitempty"` +} + +// WorkloadBinding is a link between a configuration resource and a workload resource. +// +k8s:openapi-gen=true +type WorkloadBinding struct { + // group defines the group of the referenced resource. + // +kubebuilder:validation:Enum=monitoring.coreos.com + // +required + Group string `json:"group"` + // resource defines the type of resource being referenced (e.g. Prometheus, PrometheusAgent, ThanosRuler or Alertmanager). + // +kubebuilder:validation:Enum=prometheuses;prometheusagents;thanosrulers;alertmanagers + // +required + Resource string `json:"resource"` + // name defines the name of the referenced object. + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + // namespace defines the namespace of the referenced object. + // +kubebuilder:validation:MinLength=1 + // +required + Namespace string `json:"namespace"` + // conditions defines the current state of the configuration resource when bound to the referenced Workload object. + // +listType=map + // +listMapKey=type + // +optional + Conditions []ConfigResourceCondition `json:"conditions,omitempty"` +} + +// ConfigResourceCondition describes the status of configuration resources linked to Prometheus, PrometheusAgent, Alertmanager or ThanosRuler. +// +k8s:deepcopy-gen=true +type ConfigResourceCondition struct { + // type of the condition being reported. + // Currently, only "Accepted" is supported. + // +kubebuilder:validation:Enum=Accepted + // +required + Type ConditionType `json:"type"` + // status of the condition. + // +required + Status ConditionStatus `json:"status"` + // lastTransitionTime defines the time of the last update to the current status property. + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + // reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // message defines the human-readable message indicating details for the condition's last transition. + // +optional + Message string `json:"message,omitempty"` + // observedGeneration defines the .metadata.generation that the + // condition was set based upon. For instance, if `.metadata.generation` is + // currently 12, but the `.status.conditions[].observedGeneration` is 9, the + // condition is out of date with respect to the current state of the object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..eb0acd7412 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go @@ -0,0 +1,4243 @@ +//go:build !ignore_autogenerated + +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerConfig) DeepCopyInto(out *APIServerConfig) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(Authorization) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerConfig. +func (in *APIServerConfig) DeepCopy() *APIServerConfig { + if in == nil { + return nil + } + out := new(APIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertingSpec) DeepCopyInto(out *AlertingSpec) { + *out = *in + if in.Alertmanagers != nil { + in, out := &in.Alertmanagers, &out.Alertmanagers + *out = make([]AlertmanagerEndpoints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingSpec. +func (in *AlertingSpec) DeepCopy() *AlertingSpec { + if in == nil { + return nil + } + out := new(AlertingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Alertmanager) DeepCopyInto(out *Alertmanager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alertmanager. +func (in *Alertmanager) DeepCopy() *Alertmanager { + if in == nil { + return nil + } + out := new(Alertmanager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerConfigMatcherStrategy) DeepCopyInto(out *AlertmanagerConfigMatcherStrategy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerConfigMatcherStrategy. +func (in *AlertmanagerConfigMatcherStrategy) DeepCopy() *AlertmanagerConfigMatcherStrategy { + if in == nil { + return nil + } + out := new(AlertmanagerConfigMatcherStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerConfiguration) DeepCopyInto(out *AlertmanagerConfiguration) { + *out = *in + if in.Global != nil { + in, out := &in.Global, &out.Global + *out = new(AlertmanagerGlobalConfig) + (*in).DeepCopyInto(*out) + } + if in.Templates != nil { + in, out := &in.Templates, &out.Templates + *out = make([]SecretOrConfigMap, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerConfiguration. +func (in *AlertmanagerConfiguration) DeepCopy() *AlertmanagerConfiguration { + if in == nil { + return nil + } + out := new(AlertmanagerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerEndpoints) DeepCopyInto(out *AlertmanagerEndpoints) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + out.Port = in.Port + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.Sigv4 != nil { + in, out := &in.Sigv4, &out.Sigv4 + *out = new(Sigv4) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.APIVersion != nil { + in, out := &in.APIVersion, &out.APIVersion + *out = new(AlertmanagerAPIVersion) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(Duration) + **out = **in + } + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AlertRelabelConfigs != nil { + in, out := &in.AlertRelabelConfigs, &out.AlertRelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerEndpoints. +func (in *AlertmanagerEndpoints) DeepCopy() *AlertmanagerEndpoints { + if in == nil { + return nil + } + out := new(AlertmanagerEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerGlobalConfig) DeepCopyInto(out *AlertmanagerGlobalConfig) { + *out = *in + if in.SMTPConfig != nil { + in, out := &in.SMTPConfig, &out.SMTPConfig + *out = new(GlobalSMTPConfig) + (*in).DeepCopyInto(*out) + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } + if in.SlackAPIURL != nil { + in, out := &in.SlackAPIURL, &out.SlackAPIURL + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.OpsGenieAPIURL != nil { + in, out := &in.OpsGenieAPIURL, &out.OpsGenieAPIURL + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.OpsGenieAPIKey != nil { + in, out := &in.OpsGenieAPIKey, &out.OpsGenieAPIKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.PagerdutyURL != nil { + in, out := &in.PagerdutyURL, &out.PagerdutyURL + *out = new(URL) + **out = **in + } + if in.TelegramConfig != nil { + in, out := &in.TelegramConfig, &out.TelegramConfig + *out = new(GlobalTelegramConfig) + (*in).DeepCopyInto(*out) + } + if in.JiraConfig != nil { + in, out := &in.JiraConfig, &out.JiraConfig + *out = new(GlobalJiraConfig) + (*in).DeepCopyInto(*out) + } + if in.VictorOpsConfig != nil { + in, out := &in.VictorOpsConfig, &out.VictorOpsConfig + *out = new(GlobalVictorOpsConfig) + (*in).DeepCopyInto(*out) + } + if in.RocketChatConfig != nil { + in, out := &in.RocketChatConfig, &out.RocketChatConfig + *out = new(GlobalRocketChatConfig) + (*in).DeepCopyInto(*out) + } + if in.WebexConfig != nil { + in, out := &in.WebexConfig, &out.WebexConfig + *out = new(GlobalWebexConfig) + (*in).DeepCopyInto(*out) + } + if in.WeChatConfig != nil { + in, out := &in.WeChatConfig, &out.WeChatConfig + *out = new(GlobalWeChatConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerGlobalConfig. +func (in *AlertmanagerGlobalConfig) DeepCopy() *AlertmanagerGlobalConfig { + if in == nil { + return nil + } + out := new(AlertmanagerGlobalConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerLimitsSpec) DeepCopyInto(out *AlertmanagerLimitsSpec) { + *out = *in + if in.MaxSilences != nil { + in, out := &in.MaxSilences, &out.MaxSilences + *out = new(int32) + **out = **in + } + if in.MaxPerSilenceBytes != nil { + in, out := &in.MaxPerSilenceBytes, &out.MaxPerSilenceBytes + *out = new(ByteSize) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerLimitsSpec. +func (in *AlertmanagerLimitsSpec) DeepCopy() *AlertmanagerLimitsSpec { + if in == nil { + return nil + } + out := new(AlertmanagerLimitsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerList) DeepCopyInto(out *AlertmanagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Alertmanager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerList. +func (in *AlertmanagerList) DeepCopy() *AlertmanagerList { + if in == nil { + return nil + } + out := new(AlertmanagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerSpec) DeepCopyInto(out *AlertmanagerSpec) { + *out = *in + if in.PodMetadata != nil { + in, out := &in.PodMetadata, &out.PodMetadata + *out = new(EmbeddedObjectMetadata) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageSpec) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PersistentVolumeClaimRetentionPolicy != nil { + in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy + *out = new(appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(DNSPolicy) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalPeers != nil { + in, out := &in.AdditionalPeers, &out.AdditionalPeers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ClusterLabel != nil { + in, out := &in.ClusterLabel, &out.ClusterLabel + *out = new(string) + **out = **in + } + if in.AlertmanagerConfigSelector != nil { + in, out := &in.AlertmanagerConfigSelector, &out.AlertmanagerConfigSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.AlertmanagerConfigNamespaceSelector != nil { + in, out := &in.AlertmanagerConfigNamespaceSelector, &out.AlertmanagerConfigNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + out.AlertmanagerConfigMatcherStrategy = in.AlertmanagerConfigMatcherStrategy + if in.MinReadySeconds != nil { + in, out := &in.MinReadySeconds, &out.MinReadySeconds + *out = new(int32) + **out = **in + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Web != nil { + in, out := &in.Web, &out.Web + *out = new(AlertmanagerWebSpec) + (*in).DeepCopyInto(*out) + } + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = new(AlertmanagerLimitsSpec) + (*in).DeepCopyInto(*out) + } + if in.ClusterTLS != nil { + in, out := &in.ClusterTLS, &out.ClusterTLS + *out = new(ClusterTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.AlertmanagerConfiguration != nil { + in, out := &in.AlertmanagerConfiguration, &out.AlertmanagerConfiguration + *out = new(AlertmanagerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.EnableFeatures != nil { + in, out := &in.EnableFeatures, &out.EnableFeatures + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AdditionalArgs != nil { + in, out := &in.AdditionalArgs, &out.AdditionalArgs + *out = make([]Argument, len(*in)) + copy(*out, *in) + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.HostUsers != nil { + in, out := &in.HostUsers, &out.HostUsers + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerSpec. +func (in *AlertmanagerSpec) DeepCopy() *AlertmanagerSpec { + if in == nil { + return nil + } + out := new(AlertmanagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerStatus) DeepCopyInto(out *AlertmanagerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerStatus. +func (in *AlertmanagerStatus) DeepCopy() *AlertmanagerStatus { + if in == nil { + return nil + } + out := new(AlertmanagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerWebSpec) DeepCopyInto(out *AlertmanagerWebSpec) { + *out = *in + in.WebConfigFileFields.DeepCopyInto(&out.WebConfigFileFields) + if in.GetConcurrency != nil { + in, out := &in.GetConcurrency, &out.GetConcurrency + *out = new(uint32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(uint32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerWebSpec. +func (in *AlertmanagerWebSpec) DeepCopy() *AlertmanagerWebSpec { + if in == nil { + return nil + } + out := new(AlertmanagerWebSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArbitraryFSAccessThroughSMsConfig) DeepCopyInto(out *ArbitraryFSAccessThroughSMsConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArbitraryFSAccessThroughSMsConfig. +func (in *ArbitraryFSAccessThroughSMsConfig) DeepCopy() *ArbitraryFSAccessThroughSMsConfig { + if in == nil { + return nil + } + out := new(ArbitraryFSAccessThroughSMsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Argument) DeepCopyInto(out *Argument) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Argument. +func (in *Argument) DeepCopy() *Argument { + if in == nil { + return nil + } + out := new(Argument) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachMetadata) DeepCopyInto(out *AttachMetadata) { + *out = *in + if in.Node != nil { + in, out := &in.Node, &out.Node + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachMetadata. +func (in *AttachMetadata) DeepCopy() *AttachMetadata { + if in == nil { + return nil + } + out := new(AttachMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authorization) DeepCopyInto(out *Authorization) { + *out = *in + in.SafeAuthorization.DeepCopyInto(&out.SafeAuthorization) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authorization. +func (in *Authorization) DeepCopy() *Authorization { + if in == nil { + return nil + } + out := new(Authorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureAD) DeepCopyInto(out *AzureAD) { + *out = *in + if in.Cloud != nil { + in, out := &in.Cloud, &out.Cloud + *out = new(string) + **out = **in + } + if in.ManagedIdentity != nil { + in, out := &in.ManagedIdentity, &out.ManagedIdentity + *out = new(ManagedIdentity) + **out = **in + } + if in.OAuth != nil { + in, out := &in.OAuth, &out.OAuth + *out = new(AzureOAuth) + (*in).DeepCopyInto(*out) + } + if in.SDK != nil { + in, out := &in.SDK, &out.SDK + *out = new(AzureSDK) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureAD. +func (in *AzureAD) DeepCopy() *AzureAD { + if in == nil { + return nil + } + out := new(AzureAD) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureOAuth) DeepCopyInto(out *AzureOAuth) { + *out = *in + in.ClientSecret.DeepCopyInto(&out.ClientSecret) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureOAuth. +func (in *AzureOAuth) DeepCopy() *AzureOAuth { + if in == nil { + return nil + } + out := new(AzureOAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureSDK) DeepCopyInto(out *AzureSDK) { + *out = *in + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSDK. +func (in *AzureSDK) DeepCopy() *AzureSDK { + if in == nil { + return nil + } + out := new(AzureSDK) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuth) DeepCopyInto(out *BasicAuth) { + *out = *in + in.Username.DeepCopyInto(&out.Username) + in.Password.DeepCopyInto(&out.Password) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuth. +func (in *BasicAuth) DeepCopy() *BasicAuth { + if in == nil { + return nil + } + out := new(BasicAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTLSConfig) DeepCopyInto(out *ClusterTLSConfig) { + *out = *in + in.ServerTLS.DeepCopyInto(&out.ServerTLS) + in.ClientTLS.DeepCopyInto(&out.ClientTLS) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTLSConfig. +func (in *ClusterTLSConfig) DeepCopy() *ClusterTLSConfig { + if in == nil { + return nil + } + out := new(ClusterTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) { + *out = *in + if in.PodMetadata != nil { + in, out := &in.PodMetadata, &out.PodMetadata + *out = new(EmbeddedObjectMetadata) + (*in).DeepCopyInto(*out) + } + if in.ServiceMonitorSelector != nil { + in, out := &in.ServiceMonitorSelector, &out.ServiceMonitorSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ServiceMonitorNamespaceSelector != nil { + in, out := &in.ServiceMonitorNamespaceSelector, &out.ServiceMonitorNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.PodMonitorSelector != nil { + in, out := &in.PodMonitorSelector, &out.PodMonitorSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.PodMonitorNamespaceSelector != nil { + in, out := &in.PodMonitorNamespaceSelector, &out.PodMonitorNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ProbeSelector != nil { + in, out := &in.ProbeSelector, &out.ProbeSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ProbeNamespaceSelector != nil { + in, out := &in.ProbeNamespaceSelector, &out.ProbeNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ScrapeConfigSelector != nil { + in, out := &in.ScrapeConfigSelector, &out.ScrapeConfigSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ScrapeConfigNamespaceSelector != nil { + in, out := &in.ScrapeConfigNamespaceSelector, &out.ScrapeConfigNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Shards != nil { + in, out := &in.Shards, &out.Shards + *out = new(int32) + **out = **in + } + if in.ReplicaExternalLabelName != nil { + in, out := &in.ReplicaExternalLabelName, &out.ReplicaExternalLabelName + *out = new(string) + **out = **in + } + if in.PrometheusExternalLabelName != nil { + in, out := &in.PrometheusExternalLabelName, &out.PrometheusExternalLabelName + *out = new(string) + **out = **in + } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]ScrapeProtocol, len(*in)) + copy(*out, *in) + } + if in.ExternalLabels != nil { + in, out := &in.ExternalLabels, &out.ExternalLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EnableOTLPReceiver != nil { + in, out := &in.EnableOTLPReceiver, &out.EnableOTLPReceiver + *out = new(bool) + **out = **in + } + if in.RemoteWriteReceiverMessageVersions != nil { + in, out := &in.RemoteWriteReceiverMessageVersions, &out.RemoteWriteReceiverMessageVersions + *out = make([]RemoteWriteMessageVersion, len(*in)) + copy(*out, *in) + } + if in.EnableFeatures != nil { + in, out := &in.EnableFeatures, &out.EnableFeatures + *out = make([]EnableFeature, len(*in)) + copy(*out, *in) + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageSpec) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PersistentVolumeClaimRetentionPolicy != nil { + in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy + *out = new(appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy) + **out = **in + } + if in.Web != nil { + in, out := &in.Web, &out.Web + *out = new(PrometheusWebSpec) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteWrite != nil { + in, out := &in.RemoteWrite, &out.RemoteWrite + *out = make([]RemoteWriteSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OTLP != nil { + in, out := &in.OTLP, &out.OTLP + *out = new(OTLPConfig) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(DNSPolicy) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalScrapeConfigs != nil { + in, out := &in.AdditionalScrapeConfigs, &out.AdditionalScrapeConfigs + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.APIServerConfig != nil { + in, out := &in.APIServerConfig, &out.APIServerConfig + *out = new(APIServerConfig) + (*in).DeepCopyInto(*out) + } + out.ArbitraryFSAccessThroughSMs = in.ArbitraryFSAccessThroughSMs + if in.EnforcedSampleLimit != nil { + in, out := &in.EnforcedSampleLimit, &out.EnforcedSampleLimit + *out = new(uint64) + **out = **in + } + if in.EnforcedTargetLimit != nil { + in, out := &in.EnforcedTargetLimit, &out.EnforcedTargetLimit + *out = new(uint64) + **out = **in + } + if in.EnforcedLabelLimit != nil { + in, out := &in.EnforcedLabelLimit, &out.EnforcedLabelLimit + *out = new(uint64) + **out = **in + } + if in.EnforcedLabelNameLengthLimit != nil { + in, out := &in.EnforcedLabelNameLengthLimit, &out.EnforcedLabelNameLengthLimit + *out = new(uint64) + **out = **in + } + if in.EnforcedLabelValueLengthLimit != nil { + in, out := &in.EnforcedLabelValueLengthLimit, &out.EnforcedLabelValueLengthLimit + *out = new(uint64) + **out = **in + } + if in.EnforcedKeepDroppedTargets != nil { + in, out := &in.EnforcedKeepDroppedTargets, &out.EnforcedKeepDroppedTargets + *out = new(uint64) + **out = **in + } + if in.NameValidationScheme != nil { + in, out := &in.NameValidationScheme, &out.NameValidationScheme + *out = new(NameValidationSchemeOptions) + **out = **in + } + if in.NameEscapingScheme != nil { + in, out := &in.NameEscapingScheme, &out.NameEscapingScheme + *out = new(NameEscapingSchemeOptions) + **out = **in + } + if in.ConvertClassicHistogramsToNHCB != nil { + in, out := &in.ConvertClassicHistogramsToNHCB, &out.ConvertClassicHistogramsToNHCB + *out = new(bool) + **out = **in + } + if in.ScrapeClassicHistograms != nil { + in, out := &in.ScrapeClassicHistograms, &out.ScrapeClassicHistograms + *out = new(bool) + **out = **in + } + if in.MinReadySeconds != nil { + in, out := &in.MinReadySeconds, &out.MinReadySeconds + *out = new(int32) + **out = **in + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalArgs != nil { + in, out := &in.AdditionalArgs, &out.AdditionalArgs + *out = make([]Argument, len(*in)) + copy(*out, *in) + } + if in.WALCompression != nil { + in, out := &in.WALCompression, &out.WALCompression + *out = new(bool) + **out = **in + } + if in.ExcludedFromEnforcement != nil { + in, out := &in.ExcludedFromEnforcement, &out.ExcludedFromEnforcement + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.PodTargetLabels != nil { + in, out := &in.PodTargetLabels, &out.PodTargetLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TracingConfig != nil { + in, out := &in.TracingConfig, &out.TracingConfig + *out = new(PrometheusTracingConfig) + (*in).DeepCopyInto(*out) + } + if in.BodySizeLimit != nil { + in, out := &in.BodySizeLimit, &out.BodySizeLimit + *out = new(ByteSize) + **out = **in + } + if in.SampleLimit != nil { + in, out := &in.SampleLimit, &out.SampleLimit + *out = new(uint64) + **out = **in + } + if in.TargetLimit != nil { + in, out := &in.TargetLimit, &out.TargetLimit + *out = new(uint64) + **out = **in + } + if in.LabelLimit != nil { + in, out := &in.LabelLimit, &out.LabelLimit + *out = new(uint64) + **out = **in + } + if in.LabelNameLengthLimit != nil { + in, out := &in.LabelNameLengthLimit, &out.LabelNameLengthLimit + *out = new(uint64) + **out = **in + } + if in.LabelValueLengthLimit != nil { + in, out := &in.LabelValueLengthLimit, &out.LabelValueLengthLimit + *out = new(uint64) + **out = **in + } + if in.KeepDroppedTargets != nil { + in, out := &in.KeepDroppedTargets, &out.KeepDroppedTargets + *out = new(uint64) + **out = **in + } + if in.ReloadStrategy != nil { + in, out := &in.ReloadStrategy, &out.ReloadStrategy + *out = new(ReloadStrategyType) + **out = **in + } + if in.MaximumStartupDurationSeconds != nil { + in, out := &in.MaximumStartupDurationSeconds, &out.MaximumStartupDurationSeconds + *out = new(int32) + **out = **in + } + if in.ScrapeClasses != nil { + in, out := &in.ScrapeClasses, &out.ScrapeClasses + *out = make([]ScrapeClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceDiscoveryRole != nil { + in, out := &in.ServiceDiscoveryRole, &out.ServiceDiscoveryRole + *out = new(ServiceDiscoveryRole) + **out = **in + } + if in.TSDB != nil { + in, out := &in.TSDB, &out.TSDB + *out = new(TSDBSpec) + (*in).DeepCopyInto(*out) + } + if in.ScrapeFailureLogFile != nil { + in, out := &in.ScrapeFailureLogFile, &out.ScrapeFailureLogFile + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(RuntimeConfig) + (*in).DeepCopyInto(*out) + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.HostUsers != nil { + in, out := &in.HostUsers, &out.HostUsers + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonPrometheusFields. +func (in *CommonPrometheusFields) DeepCopy() *CommonPrometheusFields { + if in == nil { + return nil + } + out := new(CommonPrometheusFields) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigResourceCondition) DeepCopyInto(out *ConfigResourceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigResourceCondition. +func (in *ConfigResourceCondition) DeepCopy() *ConfigResourceCondition { + if in == nil { + return nil + } + out := new(ConfigResourceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigResourceStatus) DeepCopyInto(out *ConfigResourceStatus) { + *out = *in + if in.Bindings != nil { + in, out := &in.Bindings, &out.Bindings + *out = make([]WorkloadBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigResourceStatus. +func (in *ConfigResourceStatus) DeepCopy() *ConfigResourceStatus { + if in == nil { + return nil + } + out := new(ConfigResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreV1TopologySpreadConstraint) DeepCopyInto(out *CoreV1TopologySpreadConstraint) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.MinDomains != nil { + in, out := &in.MinDomains, &out.MinDomains + *out = new(int32) + **out = **in + } + if in.NodeAffinityPolicy != nil { + in, out := &in.NodeAffinityPolicy, &out.NodeAffinityPolicy + *out = new(corev1.NodeInclusionPolicy) + **out = **in + } + if in.NodeTaintsPolicy != nil { + in, out := &in.NodeTaintsPolicy, &out.NodeTaintsPolicy + *out = new(corev1.NodeInclusionPolicy) + **out = **in + } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreV1TopologySpreadConstraint. +func (in *CoreV1TopologySpreadConstraint) DeepCopy() *CoreV1TopologySpreadConstraint { + if in == nil { + return nil + } + out := new(CoreV1TopologySpreadConstraint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedObjectMetadata) DeepCopyInto(out *EmbeddedObjectMetadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedObjectMetadata. +func (in *EmbeddedObjectMetadata) DeepCopy() *EmbeddedObjectMetadata { + if in == nil { + return nil + } + out := new(EmbeddedObjectMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedPersistentVolumeClaim) DeepCopyInto(out *EmbeddedPersistentVolumeClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.EmbeddedObjectMetadata.DeepCopyInto(&out.EmbeddedObjectMetadata) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedPersistentVolumeClaim. +func (in *EmbeddedPersistentVolumeClaim) DeepCopy() *EmbeddedPersistentVolumeClaim { + if in == nil { + return nil + } + out := new(EmbeddedPersistentVolumeClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(intstr.IntOrString) + **out = **in + } + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.BearerTokenSecret != nil { + in, out := &in.BearerTokenSecret, &out.BearerTokenSecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.HonorTimestamps != nil { + in, out := &in.HonorTimestamps, &out.HonorTimestamps + *out = new(bool) + **out = **in + } + if in.TrackTimestampsStaleness != nil { + in, out := &in.TrackTimestampsStaleness, &out.TrackTimestampsStaleness + *out = new(bool) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(OAuth2) + (*in).DeepCopyInto(*out) + } + if in.MetricRelabelConfigs != nil { + in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } + if in.FilterRunning != nil { + in, out := &in.FilterRunning, &out.FilterRunning + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Exemplars) DeepCopyInto(out *Exemplars) { + *out = *in + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Exemplars. +func (in *Exemplars) DeepCopy() *Exemplars { + if in == nil { + return nil + } + out := new(Exemplars) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalJiraConfig) DeepCopyInto(out *GlobalJiraConfig) { + *out = *in + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalJiraConfig. +func (in *GlobalJiraConfig) DeepCopy() *GlobalJiraConfig { + if in == nil { + return nil + } + out := new(GlobalJiraConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalRocketChatConfig) DeepCopyInto(out *GlobalRocketChatConfig) { + *out = *in + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.TokenID != nil { + in, out := &in.TokenID, &out.TokenID + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalRocketChatConfig. +func (in *GlobalRocketChatConfig) DeepCopy() *GlobalRocketChatConfig { + if in == nil { + return nil + } + out := new(GlobalRocketChatConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalSMTPConfig) DeepCopyInto(out *GlobalSMTPConfig) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(string) + **out = **in + } + if in.SmartHost != nil { + in, out := &in.SmartHost, &out.SmartHost + *out = new(HostPort) + **out = **in + } + if in.Hello != nil { + in, out := &in.Hello, &out.Hello + *out = new(string) + **out = **in + } + if in.AuthUsername != nil { + in, out := &in.AuthUsername, &out.AuthUsername + *out = new(string) + **out = **in + } + if in.AuthPassword != nil { + in, out := &in.AuthPassword, &out.AuthPassword + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AuthIdentity != nil { + in, out := &in.AuthIdentity, &out.AuthIdentity + *out = new(string) + **out = **in + } + if in.AuthSecret != nil { + in, out := &in.AuthSecret, &out.AuthSecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.RequireTLS != nil { + in, out := &in.RequireTLS, &out.RequireTLS + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(SafeTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalSMTPConfig. +func (in *GlobalSMTPConfig) DeepCopy() *GlobalSMTPConfig { + if in == nil { + return nil + } + out := new(GlobalSMTPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalTelegramConfig) DeepCopyInto(out *GlobalTelegramConfig) { + *out = *in + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalTelegramConfig. +func (in *GlobalTelegramConfig) DeepCopy() *GlobalTelegramConfig { + if in == nil { + return nil + } + out := new(GlobalTelegramConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalVictorOpsConfig) DeepCopyInto(out *GlobalVictorOpsConfig) { + *out = *in + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.APIKey != nil { + in, out := &in.APIKey, &out.APIKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalVictorOpsConfig. +func (in *GlobalVictorOpsConfig) DeepCopy() *GlobalVictorOpsConfig { + if in == nil { + return nil + } + out := new(GlobalVictorOpsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalWeChatConfig) DeepCopyInto(out *GlobalWeChatConfig) { + *out = *in + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.APISecret != nil { + in, out := &in.APISecret, &out.APISecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.APICorpID != nil { + in, out := &in.APICorpID, &out.APICorpID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalWeChatConfig. +func (in *GlobalWeChatConfig) DeepCopy() *GlobalWeChatConfig { + if in == nil { + return nil + } + out := new(GlobalWeChatConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalWebexConfig) DeepCopyInto(out *GlobalWebexConfig) { + *out = *in + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalWebexConfig. +func (in *GlobalWebexConfig) DeepCopy() *GlobalWebexConfig { + if in == nil { + return nil + } + out := new(GlobalWebexConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfig) DeepCopyInto(out *HTTPConfig) { + *out = *in + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(OAuth2) + (*in).DeepCopyInto(*out) + } + if in.BearerTokenSecret != nil { + in, out := &in.BearerTokenSecret, &out.BearerTokenSecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfig. +func (in *HTTPConfig) DeepCopy() *HTTPConfig { + if in == nil { + return nil + } + out := new(HTTPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAlias) DeepCopyInto(out *HostAlias) { + *out = *in + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. +func (in *HostAlias) DeepCopy() *HostAlias { + if in == nil { + return nil + } + out := new(HostAlias) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPort) DeepCopyInto(out *HostPort) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPort. +func (in *HostPort) DeepCopy() *HostPort { + if in == nil { + return nil + } + out := new(HostPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedIdentity) DeepCopyInto(out *ManagedIdentity) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedIdentity. +func (in *ManagedIdentity) DeepCopy() *ManagedIdentity { + if in == nil { + return nil + } + out := new(ManagedIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataConfig) DeepCopyInto(out *MetadataConfig) { + *out = *in + if in.MaxSamplesPerSend != nil { + in, out := &in.MaxSamplesPerSend, &out.MaxSamplesPerSend + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataConfig. +func (in *MetadataConfig) DeepCopy() *MetadataConfig { + if in == nil { + return nil + } + out := new(MetadataConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceSelector) DeepCopyInto(out *NamespaceSelector) { + *out = *in + if in.MatchNames != nil { + in, out := &in.MatchNames, &out.MatchNames + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSelector. +func (in *NamespaceSelector) DeepCopy() *NamespaceSelector { + if in == nil { + return nil + } + out := new(NamespaceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NativeHistogramConfig) DeepCopyInto(out *NativeHistogramConfig) { + *out = *in + if in.ScrapeClassicHistograms != nil { + in, out := &in.ScrapeClassicHistograms, &out.ScrapeClassicHistograms + *out = new(bool) + **out = **in + } + if in.NativeHistogramBucketLimit != nil { + in, out := &in.NativeHistogramBucketLimit, &out.NativeHistogramBucketLimit + *out = new(uint64) + **out = **in + } + if in.NativeHistogramMinBucketFactor != nil { + in, out := &in.NativeHistogramMinBucketFactor, &out.NativeHistogramMinBucketFactor + x := (*in).DeepCopy() + *out = &x + } + if in.ConvertClassicHistogramsToNHCB != nil { + in, out := &in.ConvertClassicHistogramsToNHCB, &out.ConvertClassicHistogramsToNHCB + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NativeHistogramConfig. +func (in *NativeHistogramConfig) DeepCopy() *NativeHistogramConfig { + if in == nil { + return nil + } + out := new(NativeHistogramConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuth2) DeepCopyInto(out *OAuth2) { + *out = *in + in.ClientID.DeepCopyInto(&out.ClientID) + in.ClientSecret.DeepCopyInto(&out.ClientSecret) + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EndpointParams != nil { + in, out := &in.EndpointParams, &out.EndpointParams + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth2. +func (in *OAuth2) DeepCopy() *OAuth2 { + if in == nil { + return nil + } + out := new(OAuth2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OTLPConfig) DeepCopyInto(out *OTLPConfig) { + *out = *in + if in.PromoteAllResourceAttributes != nil { + in, out := &in.PromoteAllResourceAttributes, &out.PromoteAllResourceAttributes + *out = new(bool) + **out = **in + } + if in.IgnoreResourceAttributes != nil { + in, out := &in.IgnoreResourceAttributes, &out.IgnoreResourceAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PromoteResourceAttributes != nil { + in, out := &in.PromoteResourceAttributes, &out.PromoteResourceAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TranslationStrategy != nil { + in, out := &in.TranslationStrategy, &out.TranslationStrategy + *out = new(TranslationStrategyOption) + **out = **in + } + if in.KeepIdentifyingResourceAttributes != nil { + in, out := &in.KeepIdentifyingResourceAttributes, &out.KeepIdentifyingResourceAttributes + *out = new(bool) + **out = **in + } + if in.ConvertHistogramsToNHCB != nil { + in, out := &in.ConvertHistogramsToNHCB, &out.ConvertHistogramsToNHCB + *out = new(bool) + **out = **in + } + if in.PromoteScopeMetadata != nil { + in, out := &in.PromoteScopeMetadata, &out.PromoteScopeMetadata + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPConfig. +func (in *OTLPConfig) DeepCopy() *OTLPConfig { + if in == nil { + return nil + } + out := new(OTLPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetricsEndpoint) DeepCopyInto(out *PodMetricsEndpoint) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.PortNumber != nil { + in, out := &in.PortNumber, &out.PortNumber + *out = new(int32) + **out = **in + } + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(intstr.IntOrString) + **out = **in + } + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.HonorTimestamps != nil { + in, out := &in.HonorTimestamps, &out.HonorTimestamps + *out = new(bool) + **out = **in + } + if in.TrackTimestampsStaleness != nil { + in, out := &in.TrackTimestampsStaleness, &out.TrackTimestampsStaleness + *out = new(bool) + **out = **in + } + if in.MetricRelabelConfigs != nil { + in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FilterRunning != nil { + in, out := &in.FilterRunning, &out.FilterRunning + *out = new(bool) + **out = **in + } + in.HTTPConfig.DeepCopyInto(&out.HTTPConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsEndpoint. +func (in *PodMetricsEndpoint) DeepCopy() *PodMetricsEndpoint { + if in == nil { + return nil + } + out := new(PodMetricsEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMonitor) DeepCopyInto(out *PodMonitor) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMonitor. +func (in *PodMonitor) DeepCopy() *PodMonitor { + if in == nil { + return nil + } + out := new(PodMonitor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMonitorList) DeepCopyInto(out *PodMonitorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodMonitor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMonitorList. +func (in *PodMonitorList) DeepCopy() *PodMonitorList { + if in == nil { + return nil + } + out := new(PodMonitorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMonitorSpec) DeepCopyInto(out *PodMonitorSpec) { + *out = *in + if in.PodTargetLabels != nil { + in, out := &in.PodTargetLabels, &out.PodTargetLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PodMetricsEndpoints != nil { + in, out := &in.PodMetricsEndpoints, &out.PodMetricsEndpoints + *out = make([]PodMetricsEndpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Selector.DeepCopyInto(&out.Selector) + if in.SelectorMechanism != nil { + in, out := &in.SelectorMechanism, &out.SelectorMechanism + *out = new(SelectorMechanism) + **out = **in + } + in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) + if in.SampleLimit != nil { + in, out := &in.SampleLimit, &out.SampleLimit + *out = new(uint64) + **out = **in + } + if in.TargetLimit != nil { + in, out := &in.TargetLimit, &out.TargetLimit + *out = new(uint64) + **out = **in + } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]ScrapeProtocol, len(*in)) + copy(*out, *in) + } + if in.FallbackScrapeProtocol != nil { + in, out := &in.FallbackScrapeProtocol, &out.FallbackScrapeProtocol + *out = new(ScrapeProtocol) + **out = **in + } + if in.LabelLimit != nil { + in, out := &in.LabelLimit, &out.LabelLimit + *out = new(uint64) + **out = **in + } + if in.LabelNameLengthLimit != nil { + in, out := &in.LabelNameLengthLimit, &out.LabelNameLengthLimit + *out = new(uint64) + **out = **in + } + if in.LabelValueLengthLimit != nil { + in, out := &in.LabelValueLengthLimit, &out.LabelValueLengthLimit + *out = new(uint64) + **out = **in + } + in.NativeHistogramConfig.DeepCopyInto(&out.NativeHistogramConfig) + if in.KeepDroppedTargets != nil { + in, out := &in.KeepDroppedTargets, &out.KeepDroppedTargets + *out = new(uint64) + **out = **in + } + if in.AttachMetadata != nil { + in, out := &in.AttachMetadata, &out.AttachMetadata + *out = new(AttachMetadata) + (*in).DeepCopyInto(*out) + } + if in.ScrapeClassName != nil { + in, out := &in.ScrapeClassName, &out.ScrapeClassName + *out = new(string) + **out = **in + } + if in.BodySizeLimit != nil { + in, out := &in.BodySizeLimit, &out.BodySizeLimit + *out = new(ByteSize) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMonitorSpec. +func (in *PodMonitorSpec) DeepCopy() *PodMonitorSpec { + if in == nil { + return nil + } + out := new(PodMonitorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeList) DeepCopyInto(out *ProbeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Probe, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeList. +func (in *ProbeList) DeepCopy() *ProbeList { + if in == nil { + return nil + } + out := new(ProbeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeParam) DeepCopyInto(out *ProbeParam) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeParam. +func (in *ProbeParam) DeepCopy() *ProbeParam { + if in == nil { + return nil + } + out := new(ProbeParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeSpec) DeepCopyInto(out *ProbeSpec) { + *out = *in + in.ProberSpec.DeepCopyInto(&out.ProberSpec) + in.Targets.DeepCopyInto(&out.Targets) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + in.BearerTokenSecret.DeepCopyInto(&out.BearerTokenSecret) + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(OAuth2) + (*in).DeepCopyInto(*out) + } + if in.MetricRelabelConfigs != nil { + in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.SampleLimit != nil { + in, out := &in.SampleLimit, &out.SampleLimit + *out = new(uint64) + **out = **in + } + if in.TargetLimit != nil { + in, out := &in.TargetLimit, &out.TargetLimit + *out = new(uint64) + **out = **in + } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]ScrapeProtocol, len(*in)) + copy(*out, *in) + } + if in.FallbackScrapeProtocol != nil { + in, out := &in.FallbackScrapeProtocol, &out.FallbackScrapeProtocol + *out = new(ScrapeProtocol) + **out = **in + } + if in.LabelLimit != nil { + in, out := &in.LabelLimit, &out.LabelLimit + *out = new(uint64) + **out = **in + } + if in.LabelNameLengthLimit != nil { + in, out := &in.LabelNameLengthLimit, &out.LabelNameLengthLimit + *out = new(uint64) + **out = **in + } + if in.LabelValueLengthLimit != nil { + in, out := &in.LabelValueLengthLimit, &out.LabelValueLengthLimit + *out = new(uint64) + **out = **in + } + in.NativeHistogramConfig.DeepCopyInto(&out.NativeHistogramConfig) + if in.KeepDroppedTargets != nil { + in, out := &in.KeepDroppedTargets, &out.KeepDroppedTargets + *out = new(uint64) + **out = **in + } + if in.ScrapeClassName != nil { + in, out := &in.ScrapeClassName, &out.ScrapeClassName + *out = new(string) + **out = **in + } + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]ProbeParam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeSpec. +func (in *ProbeSpec) DeepCopy() *ProbeSpec { + if in == nil { + return nil + } + out := new(ProbeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeTargetIngress) DeepCopyInto(out *ProbeTargetIngress) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeTargetIngress. +func (in *ProbeTargetIngress) DeepCopy() *ProbeTargetIngress { + if in == nil { + return nil + } + out := new(ProbeTargetIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeTargetStaticConfig) DeepCopyInto(out *ProbeTargetStaticConfig) { + *out = *in + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeTargetStaticConfig. +func (in *ProbeTargetStaticConfig) DeepCopy() *ProbeTargetStaticConfig { + if in == nil { + return nil + } + out := new(ProbeTargetStaticConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeTargets) DeepCopyInto(out *ProbeTargets) { + *out = *in + if in.StaticConfig != nil { + in, out := &in.StaticConfig, &out.StaticConfig + *out = new(ProbeTargetStaticConfig) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(ProbeTargetIngress) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeTargets. +func (in *ProbeTargets) DeepCopy() *ProbeTargets { + if in == nil { + return nil + } + out := new(ProbeTargets) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProberSpec) DeepCopyInto(out *ProberSpec) { + *out = *in + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProberSpec. +func (in *ProberSpec) DeepCopy() *ProberSpec { + if in == nil { + return nil + } + out := new(ProberSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Prometheus) DeepCopyInto(out *Prometheus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus. +func (in *Prometheus) DeepCopy() *Prometheus { + if in == nil { + return nil + } + out := new(Prometheus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusList) DeepCopyInto(out *PrometheusList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Prometheus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusList. +func (in *PrometheusList) DeepCopy() *PrometheusList { + if in == nil { + return nil + } + out := new(PrometheusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRule) DeepCopyInto(out *PrometheusRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRule. +func (in *PrometheusRule) DeepCopy() *PrometheusRule { + if in == nil { + return nil + } + out := new(PrometheusRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRuleExcludeConfig) DeepCopyInto(out *PrometheusRuleExcludeConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleExcludeConfig. +func (in *PrometheusRuleExcludeConfig) DeepCopy() *PrometheusRuleExcludeConfig { + if in == nil { + return nil + } + out := new(PrometheusRuleExcludeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRuleList) DeepCopyInto(out *PrometheusRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PrometheusRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleList. +func (in *PrometheusRuleList) DeepCopy() *PrometheusRuleList { + if in == nil { + return nil + } + out := new(PrometheusRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRuleSpec) DeepCopyInto(out *PrometheusRuleSpec) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]RuleGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleSpec. +func (in *PrometheusRuleSpec) DeepCopy() *PrometheusRuleSpec { + if in == nil { + return nil + } + out := new(PrometheusRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) { + *out = *in + in.CommonPrometheusFields.DeepCopyInto(&out.CommonPrometheusFields) + if in.ShardRetentionPolicy != nil { + in, out := &in.ShardRetentionPolicy, &out.ShardRetentionPolicy + *out = new(ShardRetentionPolicy) + (*in).DeepCopyInto(*out) + } + out.Rules = in.Rules + if in.PrometheusRulesExcludedFromEnforce != nil { + in, out := &in.PrometheusRulesExcludedFromEnforce, &out.PrometheusRulesExcludedFromEnforce + *out = make([]PrometheusRuleExcludeConfig, len(*in)) + copy(*out, *in) + } + if in.RuleSelector != nil { + in, out := &in.RuleSelector, &out.RuleSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RuleNamespaceSelector != nil { + in, out := &in.RuleNamespaceSelector, &out.RuleNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(QuerySpec) + (*in).DeepCopyInto(*out) + } + if in.Alerting != nil { + in, out := &in.Alerting, &out.Alerting + *out = new(AlertingSpec) + (*in).DeepCopyInto(*out) + } + if in.AdditionalAlertRelabelConfigs != nil { + in, out := &in.AdditionalAlertRelabelConfigs, &out.AdditionalAlertRelabelConfigs + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AdditionalAlertManagerConfigs != nil { + in, out := &in.AdditionalAlertManagerConfigs, &out.AdditionalAlertManagerConfigs + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.RemoteRead != nil { + in, out := &in.RemoteRead, &out.RemoteRead + *out = make([]RemoteReadSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Thanos != nil { + in, out := &in.Thanos, &out.Thanos + *out = new(ThanosSpec) + (*in).DeepCopyInto(*out) + } + if in.Exemplars != nil { + in, out := &in.Exemplars, &out.Exemplars + *out = new(Exemplars) + (*in).DeepCopyInto(*out) + } + if in.RuleQueryOffset != nil { + in, out := &in.RuleQueryOffset, &out.RuleQueryOffset + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec. +func (in *PrometheusSpec) DeepCopy() *PrometheusSpec { + if in == nil { + return nil + } + out := new(PrometheusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusStatus) DeepCopyInto(out *PrometheusStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShardStatuses != nil { + in, out := &in.ShardStatuses, &out.ShardStatuses + *out = make([]ShardStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusStatus. +func (in *PrometheusStatus) DeepCopy() *PrometheusStatus { + if in == nil { + return nil + } + out := new(PrometheusStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusTracingConfig) DeepCopyInto(out *PrometheusTracingConfig) { + *out = *in + if in.ClientType != nil { + in, out := &in.ClientType, &out.ClientType + *out = new(string) + **out = **in + } + if in.SamplingFraction != nil { + in, out := &in.SamplingFraction, &out.SamplingFraction + x := (*in).DeepCopy() + *out = &x + } + if in.Insecure != nil { + in, out := &in.Insecure, &out.Insecure + *out = new(bool) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(Duration) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusTracingConfig. +func (in *PrometheusTracingConfig) DeepCopy() *PrometheusTracingConfig { + if in == nil { + return nil + } + out := new(PrometheusTracingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusWebSpec) DeepCopyInto(out *PrometheusWebSpec) { + *out = *in + in.WebConfigFileFields.DeepCopyInto(&out.WebConfigFileFields) + if in.PageTitle != nil { + in, out := &in.PageTitle, &out.PageTitle + *out = new(string) + **out = **in + } + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusWebSpec. +func (in *PrometheusWebSpec) DeepCopy() *PrometheusWebSpec { + if in == nil { + return nil + } + out := new(PrometheusWebSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.ProxyURL != nil { + in, out := &in.ProxyURL, &out.ProxyURL + *out = new(string) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = new(string) + **out = **in + } + if in.ProxyFromEnvironment != nil { + in, out := &in.ProxyFromEnvironment, &out.ProxyFromEnvironment + *out = new(bool) + **out = **in + } + if in.ProxyConnectHeader != nil { + in, out := &in.ProxyConnectHeader, &out.ProxyConnectHeader + *out = make(map[string][]corev1.SecretKeySelector, len(*in)) + for key, val := range *in { + var outVal []corev1.SecretKeySelector + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]corev1.SecretKeySelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuerySpec) DeepCopyInto(out *QuerySpec) { + *out = *in + if in.LookbackDelta != nil { + in, out := &in.LookbackDelta, &out.LookbackDelta + *out = new(string) + **out = **in + } + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(int32) + **out = **in + } + if in.MaxSamples != nil { + in, out := &in.MaxSamples, &out.MaxSamples + *out = new(int32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuerySpec. +func (in *QuerySpec) DeepCopy() *QuerySpec { + if in == nil { + return nil + } + out := new(QuerySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueConfig) DeepCopyInto(out *QueueConfig) { + *out = *in + if in.BatchSendDeadline != nil { + in, out := &in.BatchSendDeadline, &out.BatchSendDeadline + *out = new(Duration) + **out = **in + } + if in.MinBackoff != nil { + in, out := &in.MinBackoff, &out.MinBackoff + *out = new(Duration) + **out = **in + } + if in.MaxBackoff != nil { + in, out := &in.MaxBackoff, &out.MaxBackoff + *out = new(Duration) + **out = **in + } + if in.SampleAgeLimit != nil { + in, out := &in.SampleAgeLimit, &out.SampleAgeLimit + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfig. +func (in *QueueConfig) DeepCopy() *QueueConfig { + if in == nil { + return nil + } + out := new(QueueConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelabelConfig) DeepCopyInto(out *RelabelConfig) { + *out = *in + if in.SourceLabels != nil { + in, out := &in.SourceLabels, &out.SourceLabels + *out = make([]LabelName, len(*in)) + copy(*out, *in) + } + if in.Separator != nil { + in, out := &in.Separator, &out.Separator + *out = new(string) + **out = **in + } + if in.Replacement != nil { + in, out := &in.Replacement, &out.Replacement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelabelConfig. +func (in *RelabelConfig) DeepCopy() *RelabelConfig { + if in == nil { + return nil + } + out := new(RelabelConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteReadSpec) DeepCopyInto(out *RemoteReadSpec) { + *out = *in + if in.RequiredMatchers != nil { + in, out := &in.RequiredMatchers, &out.RequiredMatchers + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RemoteTimeout != nil { + in, out := &in.RemoteTimeout, &out.RemoteTimeout + *out = new(Duration) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(OAuth2) + (*in).DeepCopyInto(*out) + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(Authorization) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.FilterExternalLabels != nil { + in, out := &in.FilterExternalLabels, &out.FilterExternalLabels + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteReadSpec. +func (in *RemoteReadSpec) DeepCopy() *RemoteReadSpec { + if in == nil { + return nil + } + out := new(RemoteReadSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteWriteSpec) DeepCopyInto(out *RemoteWriteSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.MessageVersion != nil { + in, out := &in.MessageVersion, &out.MessageVersion + *out = new(RemoteWriteMessageVersion) + **out = **in + } + if in.SendExemplars != nil { + in, out := &in.SendExemplars, &out.SendExemplars + *out = new(bool) + **out = **in + } + if in.SendNativeHistograms != nil { + in, out := &in.SendNativeHistograms, &out.SendNativeHistograms + *out = new(bool) + **out = **in + } + if in.RemoteTimeout != nil { + in, out := &in.RemoteTimeout, &out.RemoteTimeout + *out = new(Duration) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.WriteRelabelConfigs != nil { + in, out := &in.WriteRelabelConfigs, &out.WriteRelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(OAuth2) + (*in).DeepCopyInto(*out) + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(Authorization) + (*in).DeepCopyInto(*out) + } + if in.Sigv4 != nil { + in, out := &in.Sigv4, &out.Sigv4 + *out = new(Sigv4) + (*in).DeepCopyInto(*out) + } + if in.AzureAD != nil { + in, out := &in.AzureAD, &out.AzureAD + *out = new(AzureAD) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.QueueConfig != nil { + in, out := &in.QueueConfig, &out.QueueConfig + *out = new(QueueConfig) + (*in).DeepCopyInto(*out) + } + if in.MetadataConfig != nil { + in, out := &in.MetadataConfig, &out.MetadataConfig + *out = new(MetadataConfig) + (*in).DeepCopyInto(*out) + } + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } + if in.RoundRobinDNS != nil { + in, out := &in.RoundRobinDNS, &out.RoundRobinDNS + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteWriteSpec. +func (in *RemoteWriteSpec) DeepCopy() *RemoteWriteSpec { + if in == nil { + return nil + } + out := new(RemoteWriteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetainConfig) DeepCopyInto(out *RetainConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetainConfig. +func (in *RetainConfig) DeepCopy() *RetainConfig { + if in == nil { + return nil + } + out := new(RetainConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + out.Expr = in.Expr + if in.For != nil { + in, out := &in.For, &out.For + *out = new(Duration) + **out = **in + } + if in.KeepFiringFor != nil { + in, out := &in.KeepFiringFor, &out.KeepFiringFor + *out = new(NonEmptyDuration) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroup) DeepCopyInto(out *RuleGroup) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(Duration) + **out = **in + } + if in.QueryOffset != nil { + in, out := &in.QueryOffset, &out.QueryOffset + *out = new(Duration) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Limit != nil { + in, out := &in.Limit, &out.Limit + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroup. +func (in *RuleGroup) DeepCopy() *RuleGroup { + if in == nil { + return nil + } + out := new(RuleGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rules) DeepCopyInto(out *Rules) { + *out = *in + out.Alert = in.Alert +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rules. +func (in *Rules) DeepCopy() *Rules { + if in == nil { + return nil + } + out := new(Rules) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesAlert) DeepCopyInto(out *RulesAlert) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesAlert. +func (in *RulesAlert) DeepCopy() *RulesAlert { + if in == nil { + return nil + } + out := new(RulesAlert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeConfig) DeepCopyInto(out *RuntimeConfig) { + *out = *in + if in.GoGC != nil { + in, out := &in.GoGC, &out.GoGC + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeConfig. +func (in *RuntimeConfig) DeepCopy() *RuntimeConfig { + if in == nil { + return nil + } + out := new(RuntimeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SafeAuthorization) DeepCopyInto(out *SafeAuthorization) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SafeAuthorization. +func (in *SafeAuthorization) DeepCopy() *SafeAuthorization { + if in == nil { + return nil + } + out := new(SafeAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SafeTLSConfig) DeepCopyInto(out *SafeTLSConfig) { + *out = *in + in.CA.DeepCopyInto(&out.CA) + in.Cert.DeepCopyInto(&out.Cert) + if in.KeySecret != nil { + in, out := &in.KeySecret, &out.KeySecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } + if in.InsecureSkipVerify != nil { + in, out := &in.InsecureSkipVerify, &out.InsecureSkipVerify + *out = new(bool) + **out = **in + } + if in.MinVersion != nil { + in, out := &in.MinVersion, &out.MinVersion + *out = new(TLSVersion) + **out = **in + } + if in.MaxVersion != nil { + in, out := &in.MaxVersion, &out.MaxVersion + *out = new(TLSVersion) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SafeTLSConfig. +func (in *SafeTLSConfig) DeepCopy() *SafeTLSConfig { + if in == nil { + return nil + } + out := new(SafeTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScrapeClass) DeepCopyInto(out *ScrapeClass) { + *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(bool) + **out = **in + } + if in.FallbackScrapeProtocol != nil { + in, out := &in.FallbackScrapeProtocol, &out.FallbackScrapeProtocol + *out = new(ScrapeProtocol) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(Authorization) + (*in).DeepCopyInto(*out) + } + if in.Relabelings != nil { + in, out := &in.Relabelings, &out.Relabelings + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricRelabelings != nil { + in, out := &in.MetricRelabelings, &out.MetricRelabelings + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AttachMetadata != nil { + in, out := &in.AttachMetadata, &out.AttachMetadata + *out = new(AttachMetadata) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScrapeClass. +func (in *ScrapeClass) DeepCopy() *ScrapeClass { + if in == nil { + return nil + } + out := new(ScrapeClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretOrConfigMap) DeepCopyInto(out *SecretOrConfigMap) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretOrConfigMap. +func (in *SecretOrConfigMap) DeepCopy() *SecretOrConfigMap { + if in == nil { + return nil + } + out := new(SecretOrConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMonitor) DeepCopyInto(out *ServiceMonitor) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitor. +func (in *ServiceMonitor) DeepCopy() *ServiceMonitor { + if in == nil { + return nil + } + out := new(ServiceMonitor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMonitorList) DeepCopyInto(out *ServiceMonitorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceMonitor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitorList. +func (in *ServiceMonitorList) DeepCopy() *ServiceMonitorList { + if in == nil { + return nil + } + out := new(ServiceMonitorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMonitorSpec) DeepCopyInto(out *ServiceMonitorSpec) { + *out = *in + if in.TargetLabels != nil { + in, out := &in.TargetLabels, &out.TargetLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PodTargetLabels != nil { + in, out := &in.PodTargetLabels, &out.PodTargetLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Selector.DeepCopyInto(&out.Selector) + if in.SelectorMechanism != nil { + in, out := &in.SelectorMechanism, &out.SelectorMechanism + *out = new(SelectorMechanism) + **out = **in + } + in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) + if in.SampleLimit != nil { + in, out := &in.SampleLimit, &out.SampleLimit + *out = new(uint64) + **out = **in + } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]ScrapeProtocol, len(*in)) + copy(*out, *in) + } + if in.FallbackScrapeProtocol != nil { + in, out := &in.FallbackScrapeProtocol, &out.FallbackScrapeProtocol + *out = new(ScrapeProtocol) + **out = **in + } + if in.TargetLimit != nil { + in, out := &in.TargetLimit, &out.TargetLimit + *out = new(uint64) + **out = **in + } + if in.LabelLimit != nil { + in, out := &in.LabelLimit, &out.LabelLimit + *out = new(uint64) + **out = **in + } + if in.LabelNameLengthLimit != nil { + in, out := &in.LabelNameLengthLimit, &out.LabelNameLengthLimit + *out = new(uint64) + **out = **in + } + if in.LabelValueLengthLimit != nil { + in, out := &in.LabelValueLengthLimit, &out.LabelValueLengthLimit + *out = new(uint64) + **out = **in + } + in.NativeHistogramConfig.DeepCopyInto(&out.NativeHistogramConfig) + if in.KeepDroppedTargets != nil { + in, out := &in.KeepDroppedTargets, &out.KeepDroppedTargets + *out = new(uint64) + **out = **in + } + if in.AttachMetadata != nil { + in, out := &in.AttachMetadata, &out.AttachMetadata + *out = new(AttachMetadata) + (*in).DeepCopyInto(*out) + } + if in.ScrapeClassName != nil { + in, out := &in.ScrapeClassName, &out.ScrapeClassName + *out = new(string) + **out = **in + } + if in.BodySizeLimit != nil { + in, out := &in.BodySizeLimit, &out.BodySizeLimit + *out = new(ByteSize) + **out = **in + } + if in.ServiceDiscoveryRole != nil { + in, out := &in.ServiceDiscoveryRole, &out.ServiceDiscoveryRole + *out = new(ServiceDiscoveryRole) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitorSpec. +func (in *ServiceMonitorSpec) DeepCopy() *ServiceMonitorSpec { + if in == nil { + return nil + } + out := new(ServiceMonitorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardRetentionPolicy) DeepCopyInto(out *ShardRetentionPolicy) { + *out = *in + if in.WhenScaled != nil { + in, out := &in.WhenScaled, &out.WhenScaled + *out = new(WhenScaledRetentionType) + **out = **in + } + if in.Retain != nil { + in, out := &in.Retain, &out.Retain + *out = new(RetainConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardRetentionPolicy. +func (in *ShardRetentionPolicy) DeepCopy() *ShardRetentionPolicy { + if in == nil { + return nil + } + out := new(ShardRetentionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardStatus) DeepCopyInto(out *ShardStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardStatus. +func (in *ShardStatus) DeepCopy() *ShardStatus { + if in == nil { + return nil + } + out := new(ShardStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sigv4) DeepCopyInto(out *Sigv4) { + *out = *in + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SecretKey != nil { + in, out := &in.SecretKey, &out.SecretKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.UseFIPSSTSEndpoint != nil { + in, out := &in.UseFIPSSTSEndpoint, &out.UseFIPSSTSEndpoint + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sigv4. +func (in *Sigv4) DeepCopy() *Sigv4 { + if in == nil { + return nil + } + out := new(Sigv4) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(corev1.EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Ephemeral != nil { + in, out := &in.Ephemeral, &out.Ephemeral + *out = new(corev1.EphemeralVolumeSource) + (*in).DeepCopyInto(*out) + } + in.VolumeClaimTemplate.DeepCopyInto(&out.VolumeClaimTemplate) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + in.SafeTLSConfig.DeepCopyInto(&out.SafeTLSConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TSDBSpec) DeepCopyInto(out *TSDBSpec) { + *out = *in + if in.OutOfOrderTimeWindow != nil { + in, out := &in.OutOfOrderTimeWindow, &out.OutOfOrderTimeWindow + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TSDBSpec. +func (in *TSDBSpec) DeepCopy() *TSDBSpec { + if in == nil { + return nil + } + out := new(TSDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRuler) DeepCopyInto(out *ThanosRuler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRuler. +func (in *ThanosRuler) DeepCopy() *ThanosRuler { + if in == nil { + return nil + } + out := new(ThanosRuler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRulerList) DeepCopyInto(out *ThanosRulerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThanosRuler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRulerList. +func (in *ThanosRulerList) DeepCopy() *ThanosRulerList { + if in == nil { + return nil + } + out := new(ThanosRulerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRulerSpec) DeepCopyInto(out *ThanosRulerSpec) { + *out = *in + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.PodMetadata != nil { + in, out := &in.PodMetadata, &out.PodMetadata + *out = new(EmbeddedObjectMetadata) + (*in).DeepCopyInto(*out) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(DNSPolicy) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageSpec) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ObjectStorageConfig != nil { + in, out := &in.ObjectStorageConfig, &out.ObjectStorageConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectStorageConfigFile != nil { + in, out := &in.ObjectStorageConfigFile, &out.ObjectStorageConfigFile + *out = new(string) + **out = **in + } + if in.QueryEndpoints != nil { + in, out := &in.QueryEndpoints, &out.QueryEndpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.QueryConfig != nil { + in, out := &in.QueryConfig, &out.QueryConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AlertManagersURL != nil { + in, out := &in.AlertManagersURL, &out.AlertManagersURL + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AlertManagersConfig != nil { + in, out := &in.AlertManagersConfig, &out.AlertManagersConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.RuleSelector != nil { + in, out := &in.RuleSelector, &out.RuleSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RuleNamespaceSelector != nil { + in, out := &in.RuleNamespaceSelector, &out.RuleNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ExcludedFromEnforcement != nil { + in, out := &in.ExcludedFromEnforcement, &out.ExcludedFromEnforcement + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.PrometheusRulesExcludedFromEnforce != nil { + in, out := &in.PrometheusRulesExcludedFromEnforce, &out.PrometheusRulesExcludedFromEnforce + *out = make([]PrometheusRuleExcludeConfig, len(*in)) + copy(*out, *in) + } + if in.ResendDelay != nil { + in, out := &in.ResendDelay, &out.ResendDelay + *out = new(Duration) + **out = **in + } + if in.RuleOutageTolerance != nil { + in, out := &in.RuleOutageTolerance, &out.RuleOutageTolerance + *out = new(Duration) + **out = **in + } + if in.RuleQueryOffset != nil { + in, out := &in.RuleQueryOffset, &out.RuleQueryOffset + *out = new(Duration) + **out = **in + } + if in.RuleConcurrentEval != nil { + in, out := &in.RuleConcurrentEval, &out.RuleConcurrentEval + *out = new(int32) + **out = **in + } + if in.RuleGracePeriod != nil { + in, out := &in.RuleGracePeriod, &out.RuleGracePeriod + *out = new(Duration) + **out = **in + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TracingConfig != nil { + in, out := &in.TracingConfig, &out.TracingConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AlertDropLabels != nil { + in, out := &in.AlertDropLabels, &out.AlertDropLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GRPCServerTLSConfig != nil { + in, out := &in.GRPCServerTLSConfig, &out.GRPCServerTLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.MinReadySeconds != nil { + in, out := &in.MinReadySeconds, &out.MinReadySeconds + *out = new(int32) + **out = **in + } + if in.AlertRelabelConfigs != nil { + in, out := &in.AlertRelabelConfigs, &out.AlertRelabelConfigs + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AlertRelabelConfigFile != nil { + in, out := &in.AlertRelabelConfigFile, &out.AlertRelabelConfigFile + *out = new(string) + **out = **in + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalArgs != nil { + in, out := &in.AdditionalArgs, &out.AdditionalArgs + *out = make([]Argument, len(*in)) + copy(*out, *in) + } + if in.Web != nil { + in, out := &in.Web, &out.Web + *out = new(ThanosRulerWebSpec) + (*in).DeepCopyInto(*out) + } + if in.RemoteWrite != nil { + in, out := &in.RemoteWrite, &out.RemoteWrite + *out = make([]RemoteWriteSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.EnableFeatures != nil { + in, out := &in.EnableFeatures, &out.EnableFeatures + *out = make([]EnableFeature, len(*in)) + copy(*out, *in) + } + if in.HostUsers != nil { + in, out := &in.HostUsers, &out.HostUsers + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRulerSpec. +func (in *ThanosRulerSpec) DeepCopy() *ThanosRulerSpec { + if in == nil { + return nil + } + out := new(ThanosRulerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRulerStatus) DeepCopyInto(out *ThanosRulerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRulerStatus. +func (in *ThanosRulerStatus) DeepCopy() *ThanosRulerStatus { + if in == nil { + return nil + } + out := new(ThanosRulerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRulerWebSpec) DeepCopyInto(out *ThanosRulerWebSpec) { + *out = *in + in.WebConfigFileFields.DeepCopyInto(&out.WebConfigFileFields) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRulerWebSpec. +func (in *ThanosRulerWebSpec) DeepCopy() *ThanosRulerWebSpec { + if in == nil { + return nil + } + out := new(ThanosRulerWebSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosSpec) DeepCopyInto(out *ThanosSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.SHA != nil { + in, out := &in.SHA, &out.SHA + *out = new(string) + **out = **in + } + if in.BaseImage != nil { + in, out := &in.BaseImage, &out.BaseImage + *out = new(string) + **out = **in + } + in.Resources.DeepCopyInto(&out.Resources) + if in.ObjectStorageConfig != nil { + in, out := &in.ObjectStorageConfig, &out.ObjectStorageConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectStorageConfigFile != nil { + in, out := &in.ObjectStorageConfigFile, &out.ObjectStorageConfigFile + *out = new(string) + **out = **in + } + if in.TracingConfig != nil { + in, out := &in.TracingConfig, &out.TracingConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.GRPCServerTLSConfig != nil { + in, out := &in.GRPCServerTLSConfig, &out.GRPCServerTLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalArgs != nil { + in, out := &in.AdditionalArgs, &out.AdditionalArgs + *out = make([]Argument, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosSpec. +func (in *ThanosSpec) DeepCopy() *ThanosSpec { + if in == nil { + return nil + } + out := new(ThanosSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) { + *out = *in + in.CoreV1TopologySpreadConstraint.DeepCopyInto(&out.CoreV1TopologySpreadConstraint) + if in.AdditionalLabelSelectors != nil { + in, out := &in.AdditionalLabelSelectors, &out.AdditionalLabelSelectors + *out = new(AdditionalLabelSelectors) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint. +func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint { + if in == nil { + return nil + } + out := new(TopologySpreadConstraint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebConfigFileFields) DeepCopyInto(out *WebConfigFileFields) { + *out = *in + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(WebTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(WebHTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebConfigFileFields. +func (in *WebConfigFileFields) DeepCopy() *WebConfigFileFields { + if in == nil { + return nil + } + out := new(WebConfigFileFields) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebHTTPConfig) DeepCopyInto(out *WebHTTPConfig) { + *out = *in + if in.HTTP2 != nil { + in, out := &in.HTTP2, &out.HTTP2 + *out = new(bool) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = new(WebHTTPHeaders) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebHTTPConfig. +func (in *WebHTTPConfig) DeepCopy() *WebHTTPConfig { + if in == nil { + return nil + } + out := new(WebHTTPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebHTTPHeaders) DeepCopyInto(out *WebHTTPHeaders) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebHTTPHeaders. +func (in *WebHTTPHeaders) DeepCopy() *WebHTTPHeaders { + if in == nil { + return nil + } + out := new(WebHTTPHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebTLSConfig) DeepCopyInto(out *WebTLSConfig) { + *out = *in + in.Cert.DeepCopyInto(&out.Cert) + if in.CertFile != nil { + in, out := &in.CertFile, &out.CertFile + *out = new(string) + **out = **in + } + in.KeySecret.DeepCopyInto(&out.KeySecret) + if in.KeyFile != nil { + in, out := &in.KeyFile, &out.KeyFile + *out = new(string) + **out = **in + } + in.ClientCA.DeepCopyInto(&out.ClientCA) + if in.ClientCAFile != nil { + in, out := &in.ClientCAFile, &out.ClientCAFile + *out = new(string) + **out = **in + } + if in.ClientAuthType != nil { + in, out := &in.ClientAuthType, &out.ClientAuthType + *out = new(string) + **out = **in + } + if in.MinVersion != nil { + in, out := &in.MinVersion, &out.MinVersion + *out = new(string) + **out = **in + } + if in.MaxVersion != nil { + in, out := &in.MaxVersion, &out.MaxVersion + *out = new(string) + **out = **in + } + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferServerCipherSuites != nil { + in, out := &in.PreferServerCipherSuites, &out.PreferServerCipherSuites + *out = new(bool) + **out = **in + } + if in.CurvePreferences != nil { + in, out := &in.CurvePreferences, &out.CurvePreferences + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebTLSConfig. +func (in *WebTLSConfig) DeepCopy() *WebTLSConfig { + if in == nil { + return nil + } + out := new(WebTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkloadBinding) DeepCopyInto(out *WorkloadBinding) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ConfigResourceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadBinding. +func (in *WorkloadBinding) DeepCopy() *WorkloadBinding { + if in == nil { + return nil + } + out := new(WorkloadBinding) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index d45cbe1720..a680beb405 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -48,8 +48,6 @@ How to use klog - For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) - See our documentation on [pkg.go.dev/k8s.io](https://pkg.go.dev/k8s.io/klog). -**NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater. - ### Coexisting with klog/v2 See [this example](examples/coexist_klog_v1_and_v2/) to see how to coexist with both klog/v1 and klog/v2. diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index d1a4751c94..73f91ea500 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -20,7 +20,9 @@ import ( "bytes" "encoding/json" "fmt" + "slices" "strconv" + "strings" "github.com/go-logr/logr" ) @@ -51,139 +53,157 @@ func WithValues(oldKV, newKV []interface{}) []interface{} { return kv } -// MergeKVs deduplicates elements provided in two key/value slices. -// -// Keys in each slice are expected to be unique, so duplicates can only occur -// when the first and second slice contain the same key. When that happens, the -// key/value pair from the second slice is used. The first slice must be well-formed -// (= even key/value pairs). The second one may have a missing value, in which -// case the special "missing value" is added to the result. -func MergeKVs(first, second []interface{}) []interface{} { - maxLength := len(first) + (len(second)+1)/2*2 - if maxLength == 0 { - // Nothing to do at all. - return nil - } - - if len(first) == 0 && len(second)%2 == 0 { - // Nothing to be overridden, second slice is well-formed - // and can be used directly. - return second - } - - // Determine which keys are in the second slice so that we can skip - // them when iterating over the first one. The code intentionally - // favors performance over completeness: we assume that keys are string - // constants and thus compare equal when the string values are equal. A - // string constant being overridden by, for example, a fmt.Stringer is - // not handled. - overrides := map[interface{}]bool{} - for i := 0; i < len(second); i += 2 { - overrides[second[i]] = true - } - merged := make([]interface{}, 0, maxLength) - for i := 0; i+1 < len(first); i += 2 { - key := first[i] - if overrides[key] { - continue - } - merged = append(merged, key, first[i+1]) - } - merged = append(merged, second...) - if len(merged)%2 != 0 { - merged = append(merged, missingValue) - } - return merged -} - type Formatter struct { AnyToStringHook AnyToStringFunc } type AnyToStringFunc func(v interface{}) string -// MergeKVsInto is a variant of MergeKVs which directly formats the key/value -// pairs into a buffer. -func (f Formatter) MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { - if len(first) == 0 && len(second) == 0 { - // Nothing to do at all. - return - } +const missingValue = "(MISSING)" - if len(first) == 0 && len(second)%2 == 0 { - // Nothing to be overridden, second slice is well-formed - // and can be used directly. - for i := 0; i < len(second); i += 2 { - f.KVFormat(b, second[i], second[i+1]) - } - return - } +func FormatKVs(b *bytes.Buffer, kvs ...[]interface{}) { + Formatter{}.FormatKVs(b, kvs...) +} - // Determine which keys are in the second slice so that we can skip - // them when iterating over the first one. The code intentionally - // favors performance over completeness: we assume that keys are string - // constants and thus compare equal when the string values are equal. A - // string constant being overridden by, for example, a fmt.Stringer is - // not handled. - overrides := map[interface{}]bool{} - for i := 0; i < len(second); i += 2 { - overrides[second[i]] = true - } - for i := 0; i < len(first); i += 2 { - key := first[i] - if overrides[key] { - continue +// FormatKVs formats all key/value pairs such that the output contains no +// duplicates ("last one wins"). +func (f Formatter) FormatKVs(b *bytes.Buffer, kvs ...[]interface{}) { + // De-duplication is done by optimistically formatting all key value + // pairs and then cutting out the output of those key/value pairs which + // got overwritten later. + // + // In the common case of no duplicates, the only overhead is tracking + // previous keys. This uses a slice with a simple linear search because + // the number of entries is typically so low that allocating a map or + // keeping a sorted slice with binary search aren't justified. + // + // Using a fixed size here makes the Go compiler use the stack as + // initial backing store for the slice, which is crucial for + // performance. + existing := make([]obsoleteKV, 0, 32) + obsolete := make([]interval, 0, 32) // Sorted by start index. + for _, keysAndValues := range kvs { + for i := 0; i < len(keysAndValues); i += 2 { + var v interface{} + k := keysAndValues[i] + if i+1 < len(keysAndValues) { + v = keysAndValues[i+1] + } else { + v = missingValue + } + var e obsoleteKV + e.start = b.Len() + e.key = f.KVFormat(b, k, v) + e.end = b.Len() + i := findObsoleteEntry(existing, e.key) + if i >= 0 { + data := b.Bytes() + if bytes.Compare(data[existing[i].start:existing[i].end], data[e.start:e.end]) == 0 { + // The new entry gets obsoleted because it's identical. + // This has the advantage that key/value pairs from + // a WithValues call always come first, even if the same + // pair gets added again later. This makes different log + // entries more consistent. + // + // The new entry has a higher start index and thus can be appended. + obsolete = append(obsolete, e.interval) + } else { + // The old entry gets obsoleted because it's value is different. + // + // Sort order is not guaranteed, we have to insert at the right place. + index, _ := slices.BinarySearchFunc(obsolete, existing[i].interval, func(a, b interval) int { return a.start - b.start }) + obsolete = slices.Insert(obsolete, index, existing[i].interval) + existing[i].interval = e.interval + } + } else { + // Instead of appending at the end and doing a + // linear search in findEntry, we could keep + // the slice sorted by key and do a binary search. + // + // Above: + // i, ok := slices.BinarySearchFunc(existing, e, func(a, b entry) int { return strings.Compare(a.key, b.key) }) + // Here: + // existing = slices.Insert(existing, i, e) + // + // But that adds a dependency on the slices package + // and made performance slightly worse, presumably + // because the cost of shifting entries around + // did not pay of with faster lookups. + existing = append(existing, e) + } } - f.KVFormat(b, key, first[i+1]) } - // Round down. - l := len(second) - l = l / 2 * 2 - for i := 1; i < l; i += 2 { - f.KVFormat(b, second[i-1], second[i]) - } - if len(second)%2 == 1 { - f.KVFormat(b, second[len(second)-1], missingValue) - } -} -func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { - Formatter{}.MergeAndFormatKVs(b, first, second) -} + // If we need to remove some obsolete key/value pairs then move the memory. + if len(obsolete) > 0 { + // Potentially the next remaining output (might itself be obsolete). + from := obsolete[0].end + // Next obsolete entry. + nextObsolete := 1 + // This is the source buffer, before truncation. + all := b.Bytes() + b.Truncate(obsolete[0].start) -const missingValue = "(MISSING)" + for nextObsolete < len(obsolete) { + if from == obsolete[nextObsolete].start { + // Skip also the next obsolete key/value. + from = obsolete[nextObsolete].end + nextObsolete++ + continue + } -// KVListFormat serializes all key/value pairs into the provided buffer. -// A space gets inserted before the first pair and between each pair. -func (f Formatter) KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { - for i := 0; i < len(keysAndValues); i += 2 { - var v interface{} - k := keysAndValues[i] - if i+1 < len(keysAndValues) { - v = keysAndValues[i+1] - } else { - v = missingValue + // Preserve some output. Write uses copy, which + // explicitly allows source and destination to overlap. + // That could happen here. + valid := all[from:obsolete[nextObsolete].start] + b.Write(valid) + from = obsolete[nextObsolete].end + nextObsolete++ } - f.KVFormat(b, k, v) + // Copy end of buffer. + valid := all[from:] + b.Write(valid) } } -func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { - Formatter{}.KVListFormat(b, keysAndValues...) +type obsoleteKV struct { + key string + interval +} + +// interval includes the start and excludes the end. +type interval struct { + start int + end int } -func KVFormat(b *bytes.Buffer, k, v interface{}) { - Formatter{}.KVFormat(b, k, v) +func findObsoleteEntry(entries []obsoleteKV, key string) int { + for i, entry := range entries { + if entry.key == key { + return i + } + } + return -1 } // formatAny is the fallback formatter for a value. It supports a hook (for // example, for YAML encoding) and itself uses JSON encoding. func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { - b.WriteRune('=') if f.AnyToStringHook != nil { - b.WriteString(f.AnyToStringHook(v)) + str := f.AnyToStringHook(v) + if strings.Contains(str, "\n") { + // If it's multi-line, then pass it through writeStringValue to get start/end delimiters, + // which separates it better from any following key/value pair. + writeStringValue(b, str) + return + } + // Otherwise put it directly after the separator, on the same lime, + // The assumption is that the hook returns something where start/end are obvious. + b.WriteRune('=') + b.WriteString(str) return } + b.WriteRune('=') formatAsJSON(b, v) } diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go index d9c7d15467..b8c7e443d0 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go @@ -28,7 +28,7 @@ import ( // KVFormat serializes one key/value pair into the provided buffer. // A space gets inserted before the pair. -func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) string { // This is the version without slog support. Must be kept in sync with // the version in keyvalues_slog.go. @@ -37,13 +37,15 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments // for the sake of performance. Keys with spaces, // special characters, etc. will break parsing. + var key string if sK, ok := k.(string); ok { // Avoid one allocation when the key is a string, which // normally it should be. - b.WriteString(sK) + key = sK } else { - b.WriteString(fmt.Sprintf("%s", k)) + key = fmt.Sprintf("%s", k) } + b.WriteString(key) // The type checks are sorted so that more frequently used ones // come first because that is then faster in the common @@ -94,4 +96,6 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { default: f.formatAny(b, v) } + + return key } diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go index 89acf97723..8e00843645 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go @@ -29,8 +29,8 @@ import ( ) // KVFormat serializes one key/value pair into the provided buffer. -// A space gets inserted before the pair. -func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { +// A space gets inserted before the pair. It returns the key. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) string { // This is the version without slog support. Must be kept in sync with // the version in keyvalues_slog.go. @@ -39,13 +39,15 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments // for the sake of performance. Keys with spaces, // special characters, etc. will break parsing. + var key string if sK, ok := k.(string); ok { // Avoid one allocation when the key is a string, which // normally it should be. - b.WriteString(sK) + key = sK } else { - b.WriteString(fmt.Sprintf("%s", k)) + key = fmt.Sprintf("%s", k) } + b.WriteString(key) // The type checks are sorted so that more frequently used ones // come first because that is then faster in the common @@ -112,6 +114,8 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { default: f.formatAny(b, v) } + + return key } // generateJSON has the same preference for plain strings as KVFormat. diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 47ec9466a6..319ffbe248 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -58,15 +58,30 @@ // // -logtostderr=true // Logs are written to standard error instead of to files. -// This shortcuts most of the usual output routing: -// -alsologtostderr, -stderrthreshold and -log_dir have no -// effect and output redirection at runtime with SetOutput is -// ignored. +// By default, all logs are written regardless of severity +// (legacy behavior). To filter logs by severity when +// -logtostderr=true, set -legacy_stderr_threshold_behavior=false +// and use -stderrthreshold. +// With -legacy_stderr_threshold_behavior=true, +// -stderrthreshold has no effect. +// +// The following flags always have no effect: +// -alsologtostderr, -alsologtostderrthreshold, and -log_dir. +// Output redirection at runtime with SetOutput is also ignored. // -alsologtostderr=false // Logs are written to standard error as well as to files. +// -alsologtostderrthreshold=INFO +// Log events at or above this severity are logged to standard +// error when -alsologtostderr=true (no effect when -logtostderr=true). +// Default is INFO to maintain backward compatibility. // -stderrthreshold=ERROR // Log events at or above this severity are logged to standard -// error as well as to files. +// error as well as to files. When -logtostderr=true, this flag +// has no effect unless -legacy_stderr_threshold_behavior=false. +// -legacy_stderr_threshold_behavior=true +// If true, -stderrthreshold is ignored when -logtostderr=true +// (legacy behavior). If false, -stderrthreshold is honored even +// when -logtostderr=true, allowing severity-based filtering. // -log_dir="" // Log files will be written to this directory instead of the // default temporary directory. @@ -156,7 +171,7 @@ func (s *severityValue) Set(value string) error { } threshold = severity.Severity(v) } - logging.stderrThreshold.set(threshold) + s.set(threshold) return nil } @@ -409,6 +424,15 @@ var commandLine flag.FlagSet // init sets up the defaults and creates command line flags. func init() { + // Initialize severity thresholds + logging.stderrThreshold = severityValue{ + Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. + } + logging.alsologtostderrthreshold = severityValue{ + Severity: severity.InfoLog, // Default alsologtostderrthreshold is INFO (to maintain backward compatibility). + } + logging.setVState(0, nil, false) + commandLine.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory (no effect when -logtostderr=true)") commandLine.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file (no effect when -logtostderr=true)") commandLine.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800, @@ -416,16 +440,14 @@ func init() { "If the value is 0, the maximum file size is unlimited.") commandLine.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files") commandLine.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files (no effect when -logtostderr=true)") - logging.setVState(0, nil, false) + commandLine.BoolVar(&logging.legacyStderrThresholdBehavior, "legacy_stderr_threshold_behavior", true, "If true, stderrthreshold is ignored when logtostderr=true (legacy behavior). If false, stderrthreshold is honored even when logtostderr=true") commandLine.Var(&logging.verbosity, "v", "number for the log level verbosity") commandLine.BoolVar(&logging.addDirHeader, "add_dir_header", false, "If true, adds the file directory to the header of the log messages") commandLine.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") commandLine.BoolVar(&logging.oneOutput, "one_output", false, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)") commandLine.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when opening log files (no effect when -logtostderr=true)") - logging.stderrThreshold = severityValue{ - Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. - } - commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true)") + commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true unless -legacy_stderr_threshold_behavior=false)") + commandLine.Var(&logging.alsologtostderrthreshold, "alsologtostderrthreshold", "logs at or above this threshold go to stderr when -alsologtostderr=true (no effect when -logtostderr=true)") commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") @@ -470,11 +492,13 @@ type settings struct { // Boolean flags. Not handled atomically because the flag.Value interface // does not let us avoid the =true, and that shorthand is necessary for // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + legacyStderrThresholdBehavior bool // The -legacy_stderr_threshold_behavior flag. // Level flag. Handled atomically. - stderrThreshold severityValue // The -stderrthreshold flag. + stderrThreshold severityValue // The -stderrthreshold flag. + alsologtostderrthreshold severityValue // The -alsologtostderrthreshold flag. // Access to all of the following fields must be protected via a mutex. @@ -809,16 +833,21 @@ func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg str // printS is called from infoS and errorS if logger is not specified. // set log severity by s func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { - // Only create a new buffer if we don't have one cached. - b := buffer.GetBuffer() // The message is always quoted, even if it contains line breaks. // If developers want multi-line output, they should use a small, fixed // message and put the multi-line output into a value. - b.WriteString(strconv.Quote(msg)) + qMsg := make([]byte, 0, 1024) + qMsg = strconv.AppendQuote(qMsg, msg) + + // Only create a new buffer if we don't have one cached. + b := buffer.GetBuffer() + b.Write(qMsg) + + var errKV []interface{} if err != nil { - serialize.KVListFormat(&b.Buffer, "err", err) + errKV = []interface{}{"err", err} } - serialize.KVListFormat(&b.Buffer, keysAndValues...) + serialize.FormatKVs(&b.Buffer, errKV, keysAndValues) l.printDepth(s, nil, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. buffer.PutBuffer(b) @@ -885,9 +914,25 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu } } } else if l.toStderr { - os.Stderr.Write(data) + // When logging to stderr only, check if we should filter by severity. + // This is controlled by the legacy_stderr_threshold_behavior flag. + if l.legacyStderrThresholdBehavior { + // Legacy behavior: always write to stderr, ignore stderrthreshold + os.Stderr.Write(data) + } else { + // New behavior: honor stderrthreshold even when logtostderr=true + if s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + } } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + // Write to stderr if any of these conditions are met: + // - alsoToStderr is set (legacy behavior) + // - alsologtostderr is set and severity meets alsologtostderrthreshold + // - alsologtostderr is not set and severity meets stderrThreshold + if alsoToStderr || + (l.alsoToStderr && s >= l.alsologtostderrthreshold.get()) || + (!l.alsoToStderr && s >= l.stderrThreshold.get()) { os.Stderr.Write(data) } diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index efec96fd45..6204c7bb43 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -53,7 +53,7 @@ func (l *klogger) Init(info logr.RuntimeInfo) { } func (l *klogger) Info(level int, msg string, kvList ...interface{}) { - merged := serialize.MergeKVs(l.values, kvList) + merged := serialize.WithValues(l.values, kvList) // Skip this function. VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } @@ -63,7 +63,7 @@ func (l *klogger) Enabled(level int) bool { } func (l *klogger) Error(err error, msg string, kvList ...interface{}) { - merged := serialize.MergeKVs(l.values, kvList) + merged := serialize.WithValues(l.values, kvList) ErrorSDepth(l.callDepth+1, err, msg, merged...) } diff --git a/vendor/k8s.io/klog/v2/klogr_slog.go b/vendor/k8s.io/klog/v2/klogr_slog.go index c77d7baafa..901e28dd39 100644 --- a/vendor/k8s.io/klog/v2/klogr_slog.go +++ b/vendor/k8s.io/klog/v2/klogr_slog.go @@ -63,12 +63,17 @@ func slogOutput(file string, line int, now time.Time, err error, s severity.Seve } // See printS. + qMsg := make([]byte, 0, 1024) + qMsg = strconv.AppendQuote(qMsg, msg) + b := buffer.GetBuffer() - b.WriteString(strconv.Quote(msg)) + b.Write(qMsg) + + var errKV []interface{} if err != nil { - serialize.KVListFormat(&b.Buffer, "err", err) + errKV = []interface{}{"err", err} } - serialize.KVListFormat(&b.Buffer, kvList...) + serialize.FormatKVs(&b.Buffer, errKV, kvList) // See print + header. buf := logging.formatHeader(s, file, line, now) diff --git a/vendor/modules.txt b/vendor/modules.txt index 03783aa337..0c87724f35 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -213,6 +213,10 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib +# github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.2 +## explicit; go 1.24.0 +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1 # github.com/prometheus/client_golang v1.23.2 ## explicit; go 1.23.0 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil @@ -261,7 +265,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# go.yaml.in/yaml/v2 v2.4.3 +# go.yaml.in/yaml/v2 v2.4.4 ## explicit; go 1.15 go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 @@ -901,8 +905,8 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/klog/v2 v2.130.1 -## explicit; go 1.18 +# k8s.io/klog/v2 v2.140.0 +## explicit; go 1.21 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer k8s.io/klog/v2/internal/clock @@ -910,7 +914,7 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 +# k8s.io/kube-openapi v0.0.0-20260317180543-43fb72c5454a ## explicit; go 1.23.0 k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common @@ -991,7 +995,7 @@ sigs.k8s.io/json/internal/golang/encoding/json ## explicit; go 1.18 sigs.k8s.io/randfill sigs.k8s.io/randfill/bytesource -# sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 +# sigs.k8s.io/structured-merge-diff/v6 v6.3.2 ## explicit; go 1.23 sigs.k8s.io/structured-merge-diff/v6/fieldpath sigs.k8s.io/structured-merge-diff/v6/merge From 7ac20873e4806570e581f97623a9349fb527f901 Mon Sep 17 00:00:00 2001 From: Anatolii Bazko Date: Mon, 11 May 2026 17:50:09 +0200 Subject: [PATCH 2/9] fixup Signed-off-by: Anatolii Bazko --- .../che-operator.clusterserviceversion.yaml | 23 +++++++++++++++---- pkg/deploy/metrics/metrics.go | 20 ++++++++-------- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml b/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml index 799632954f..c433505f22 100644 --- a/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml +++ b/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024 Red Hat, Inc. +# Copyright (c) 2019-2026 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ @@ -86,7 +86,7 @@ metadata: categories: Developer Tools certified: "false" containerImage: quay.io/eclipse/che-operator:next - createdAt: "2026-04-30T14:29:49Z" + createdAt: "2026-05-11T15:47:30Z" description: A Kube-native development solution that delivers portable and collaborative developer workspaces. features.operators.openshift.io/cnf: "false" @@ -108,7 +108,7 @@ metadata: operatorframework.io/arch.amd64: supported operatorframework.io/arch.arm64: supported operatorframework.io/os.linux: supported - name: eclipse-che.v7.117.0-975.next + name: eclipse-che.v7.118.0-976.next namespace: placeholder spec: apiservicedefinitions: {} @@ -907,8 +907,21 @@ spec: resources: - servicemonitors verbs: - - get - create + - delete + - get + - update + - patch + - watch + - list + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - watch + - list serviceAccountName: che-operator deployments: - label: @@ -1144,7 +1157,7 @@ spec: name: gateway-authorization-sidecar-k8s - image: quay.io/che-incubator/header-rewrite-proxy:latest name: gateway-header-sidecar - version: 7.117.0-975.next + version: 7.118.0-976.next webhookdefinitions: - admissionReviewVersions: - v1 diff --git a/pkg/deploy/metrics/metrics.go b/pkg/deploy/metrics/metrics.go index 9db8d04e8e..205b7c36c0 100644 --- a/pkg/deploy/metrics/metrics.go +++ b/pkg/deploy/metrics/metrics.go @@ -39,7 +39,8 @@ const ( ) var ( - log = ctrl.Log.WithName("metrics") + log = ctrl.Log.WithName("metrics") + isAbandonedResourcesDeleted = false ) type PrometheusResourceProvider interface { @@ -76,8 +77,13 @@ func (r *MetricsReconciler) Reconcile(ctx *chetypes.DeployContext) (reconcile.Re } } - if err := deleteAbandonedResources(ctx); err != nil { - return reconcile.Result{}, false, err + if !isAbandonedResourcesDeleted { + if err := deleteAbandonedResources(ctx); err != nil { + return reconcile.Result{}, false, err + } + + // We don't need to delete them on every reconcile loop + isAbandonedResourcesDeleted = true } return reconcile.Result{}, true, nil @@ -281,13 +287,7 @@ func deleteAbandonedResources(ctx *chetypes.DeployContext) error { syncObject.Object, ) if err != nil { - log.Error( - err, - "Failed to delete resource", - "kind", - syncObject.Object.GetObjectKind().GroupVersionKind(), - syncObject.Key, - ) + return err } } From 2f1a8289e5ff018a34d9408f48c7b481a98625f2 Mon Sep 17 00:00:00 2001 From: Anatolii Bazko Date: Wed, 13 May 2026 09:31:58 +0200 Subject: [PATCH 3/9] fixes Signed-off-by: Anatolii Bazko --- .../che-operator.clusterserviceversion.yaml | 10 ++-- config/rbac/cluster_role.yaml | 4 -- deploy/deployment/kubernetes/combined.yaml | 4 -- .../objects/che-operator.ClusterRole.yaml | 4 -- deploy/deployment/openshift/combined.yaml | 4 -- .../objects/che-operator.ClusterRole.yaml | 4 -- .../templates/che-operator.ClusterRole.yaml | 4 -- .../metrics/dwo_prometheus_resources.go | 2 +- pkg/deploy/metrics/metrics.go | 9 +++- pkg/deploy/metrics/metrics_test.go | 49 +++++++++++++++++++ 10 files changed, 60 insertions(+), 34 deletions(-) diff --git a/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml b/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml index c433505f22..0c4aef612e 100644 --- a/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml +++ b/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml @@ -86,7 +86,7 @@ metadata: categories: Developer Tools certified: "false" containerImage: quay.io/eclipse/che-operator:next - createdAt: "2026-05-11T15:47:30Z" + createdAt: "2026-05-13T07:31:33Z" description: A Kube-native development solution that delivers portable and collaborative developer workspaces. features.operators.openshift.io/cnf: "false" @@ -108,7 +108,7 @@ metadata: operatorframework.io/arch.amd64: supported operatorframework.io/arch.arm64: supported operatorframework.io/os.linux: supported - name: eclipse-che.v7.118.0-976.next + name: eclipse-che.v7.118.0-977.next namespace: placeholder spec: apiservicedefinitions: {} @@ -907,11 +907,7 @@ spec: resources: - servicemonitors verbs: - - create - - delete - get - - update - - patch - watch - list - apiGroups: @@ -1157,7 +1153,7 @@ spec: name: gateway-authorization-sidecar-k8s - image: quay.io/che-incubator/header-rewrite-proxy:latest name: gateway-header-sidecar - version: 7.118.0-976.next + version: 7.118.0-977.next webhookdefinitions: - admissionReviewVersions: - v1 diff --git a/config/rbac/cluster_role.yaml b/config/rbac/cluster_role.yaml index 0136b737bb..dce4acb7bd 100644 --- a/config/rbac/cluster_role.yaml +++ b/config/rbac/cluster_role.yaml @@ -384,11 +384,7 @@ rules: resources: - servicemonitors verbs: - - create - - delete - get - - update - - patch - watch - list - apiGroups: diff --git a/deploy/deployment/kubernetes/combined.yaml b/deploy/deployment/kubernetes/combined.yaml index c4cbb287b9..099062922a 100644 --- a/deploy/deployment/kubernetes/combined.yaml +++ b/deploy/deployment/kubernetes/combined.yaml @@ -23180,11 +23180,7 @@ rules: resources: - servicemonitors verbs: - - create - - delete - get - - update - - patch - watch - list - apiGroups: diff --git a/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml b/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml index 074a0df6d1..0f34936b44 100644 --- a/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml +++ b/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml @@ -384,11 +384,7 @@ rules: resources: - servicemonitors verbs: - - create - - delete - get - - update - - patch - watch - list - apiGroups: diff --git a/deploy/deployment/openshift/combined.yaml b/deploy/deployment/openshift/combined.yaml index 355fbe73be..8c11417299 100644 --- a/deploy/deployment/openshift/combined.yaml +++ b/deploy/deployment/openshift/combined.yaml @@ -23180,11 +23180,7 @@ rules: resources: - servicemonitors verbs: - - create - - delete - get - - update - - patch - watch - list - apiGroups: diff --git a/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml b/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml index 074a0df6d1..0f34936b44 100644 --- a/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml +++ b/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml @@ -384,11 +384,7 @@ rules: resources: - servicemonitors verbs: - - create - - delete - get - - update - - patch - watch - list - apiGroups: diff --git a/helmcharts/next/templates/che-operator.ClusterRole.yaml b/helmcharts/next/templates/che-operator.ClusterRole.yaml index 074a0df6d1..0f34936b44 100644 --- a/helmcharts/next/templates/che-operator.ClusterRole.yaml +++ b/helmcharts/next/templates/che-operator.ClusterRole.yaml @@ -384,11 +384,7 @@ rules: resources: - servicemonitors verbs: - - create - - delete - get - - update - - patch - watch - list - apiGroups: diff --git a/pkg/deploy/metrics/dwo_prometheus_resources.go b/pkg/deploy/metrics/dwo_prometheus_resources.go index b0aede83ee..4c054505a6 100644 --- a/pkg/deploy/metrics/dwo_prometheus_resources.go +++ b/pkg/deploy/metrics/dwo_prometheus_resources.go @@ -84,7 +84,7 @@ func (r *DWOPrometheusResourceProvider) GetPrometheusRole(ctx *chetypes.DeployCo }, ObjectMeta: metav1.ObjectMeta{ Name: dwoPrometheusRoleName, - Namespace: ctx.CheCluster.Namespace, + Namespace: namespace, Labels: deploy.GetLabels(constants.MetricsComponentName), }, Rules: []rbacv1.PolicyRule{ diff --git a/pkg/deploy/metrics/metrics.go b/pkg/deploy/metrics/metrics.go index 205b7c36c0..388860f107 100644 --- a/pkg/deploy/metrics/metrics.go +++ b/pkg/deploy/metrics/metrics.go @@ -62,8 +62,10 @@ func (r *MetricsReconciler) Reconcile(ctx *chetypes.DeployContext) (reconcile.Re return reconcile.Result{}, false, err } - if err := addOpenShiftMonitoringLabel(ctx); err != nil { - return reconcile.Result{}, false, err + if infrastructure.IsOpenShift() { + if err := addOpenShiftMonitoringLabel(ctx); err != nil { + return reconcile.Result{}, false, err + } } isCheServerMetricsEnabled := ctx.CheCluster.Spec.Components.Metrics.Enable @@ -90,6 +92,9 @@ func (r *MetricsReconciler) Reconcile(ctx *chetypes.DeployContext) (reconcile.Re } func (r *MetricsReconciler) Finalize(ctx *chetypes.DeployContext) bool { + // Do not remove the openshift.io/cluster-monitoring label, + // as it may have already existed. + cheServerPrometheusResources, err := collectPrometheusResources(ctx, &CheServerPrometheusResourceProvider{}) if err != nil { log.Error(err, "Failed to collect Prometheus resources") diff --git a/pkg/deploy/metrics/metrics_test.go b/pkg/deploy/metrics/metrics_test.go index d262716f43..f1c29a3d11 100644 --- a/pkg/deploy/metrics/metrics_test.go +++ b/pkg/deploy/metrics/metrics_test.go @@ -114,3 +114,52 @@ func TestReconcileMetricsDisabled(t *testing.T) { assert.False(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: cheServerRoleBindingName, Namespace: "eclipse-che"}, &rbacv1.RoleBinding{})) assert.False(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: cheServerServiceMonitorName, Namespace: "eclipse-che"}, &monitoringv1.ServiceMonitor{})) } + +func TestFinalizeMetrics(t *testing.T) { + cheCluster := &chev2.CheCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "eclipse-che", + Name: "eclipse-che", + }, + Spec: chev2.CheClusterSpec{ + Components: chev2.CheClusterComponents{ + Metrics: chev2.ServerMetrics{Enable: true}, + }, + }, + } + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "eclipse-che", + }, + } + + ctx := test.NewCtxBuilder().WithCheCluster(cheCluster).WithObjects(namespace).Build() + + reconciler := NewMetricsReconciler() + test.EnsureReconcile(t, ctx, reconciler.Reconcile) + + cheFlavor := defaults.GetCheFlavor() + cheServerRoleName := fmt.Sprintf(cheServerPrometheusRoleNameTemplate, cheFlavor) + cheServerRoleBindingName := fmt.Sprintf(cheServerPrometheusRoleBindingNameTemplate, cheFlavor) + cheServerServiceMonitorName := fmt.Sprintf(cheServerServiceMonitorNameTemplate, cheFlavor) + + // Verify resources exist before finalize + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoPrometheusRoleName, Namespace: "openshift-operators"}, &rbacv1.Role{})) + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoPrometheusRoleBindingName, Namespace: "openshift-operators"}, &rbacv1.RoleBinding{})) + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoServiceMonitorName, Namespace: "eclipse-che"}, &monitoringv1.ServiceMonitor{})) + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: cheServerRoleName, Namespace: "eclipse-che"}, &rbacv1.Role{})) + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: cheServerRoleBindingName, Namespace: "eclipse-che"}, &rbacv1.RoleBinding{})) + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: cheServerServiceMonitorName, Namespace: "eclipse-che"}, &monitoringv1.ServiceMonitor{})) + + done := reconciler.Finalize(ctx) + assert.True(t, done) + + // Verify all resources are deleted + assert.False(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoPrometheusRoleName, Namespace: "openshift-operators"}, &rbacv1.Role{})) + assert.False(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoPrometheusRoleBindingName, Namespace: "openshift-operators"}, &rbacv1.RoleBinding{})) + assert.False(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoServiceMonitorName, Namespace: "eclipse-che"}, &monitoringv1.ServiceMonitor{})) + assert.False(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: cheServerRoleName, Namespace: "eclipse-che"}, &rbacv1.Role{})) + assert.False(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: cheServerRoleBindingName, Namespace: "eclipse-che"}, &rbacv1.RoleBinding{})) + assert.False(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: cheServerServiceMonitorName, Namespace: "eclipse-che"}, &monitoringv1.ServiceMonitor{})) +} From 8bff4535431670c8e300c43b5003d8674238e408 Mon Sep 17 00:00:00 2001 From: Anatolii Bazko Date: Wed, 13 May 2026 09:34:58 +0200 Subject: [PATCH 4/9] fixes Signed-off-by: Anatolii Bazko --- pkg/deploy/metrics/metrics_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/deploy/metrics/metrics_test.go b/pkg/deploy/metrics/metrics_test.go index f1c29a3d11..34d9ced320 100644 --- a/pkg/deploy/metrics/metrics_test.go +++ b/pkg/deploy/metrics/metrics_test.go @@ -56,7 +56,7 @@ func TestReconcileMetrics(t *testing.T) { cheFlavor := defaults.GetCheFlavor() // DWO resources - assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoPrometheusRoleName, Namespace: "eclipse-che"}, &rbacv1.Role{})) + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoPrometheusRoleName, Namespace: "openshift-operators"}, &rbacv1.Role{})) assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoPrometheusRoleBindingName, Namespace: "openshift-operators"}, &rbacv1.RoleBinding{})) assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoServiceMonitorName, Namespace: "eclipse-che"}, &monitoringv1.ServiceMonitor{})) @@ -102,7 +102,7 @@ func TestReconcileMetricsDisabled(t *testing.T) { cheFlavor := defaults.GetCheFlavor() // DWO resources should still exist - assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoPrometheusRoleName, Namespace: "eclipse-che"}, &rbacv1.Role{})) + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoPrometheusRoleName, Namespace: "openshift-operators"}, &rbacv1.Role{})) assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoPrometheusRoleBindingName, Namespace: "openshift-operators"}, &rbacv1.RoleBinding{})) assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoServiceMonitorName, Namespace: "eclipse-che"}, &monitoringv1.ServiceMonitor{})) From 7804a40b8cd03d510f45c7d491886b91f95595c0 Mon Sep 17 00:00:00 2001 From: Anatolii Bazko Date: Wed, 13 May 2026 09:55:55 +0200 Subject: [PATCH 5/9] fixes Signed-off-by: Anatolii Bazko --- pkg/deploy/metrics/metrics.go | 47 ++++------------ pkg/deploy/metrics/metrics_test.go | 90 ++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 35 deletions(-) diff --git a/pkg/deploy/metrics/metrics.go b/pkg/deploy/metrics/metrics.go index 388860f107..5bfe6cda7f 100644 --- a/pkg/deploy/metrics/metrics.go +++ b/pkg/deploy/metrics/metrics.go @@ -19,6 +19,7 @@ import ( "github.com/eclipse-che/che-operator/pkg/common/infrastructure" defaults "github.com/eclipse-che/che-operator/pkg/common/operator-defaults" "github.com/eclipse-che/che-operator/pkg/common/reconciler" + "github.com/pkg/errors" rbacv1 "k8s.io/api/rbac/v1" ctrl "sigs.k8s.io/controller-runtime" @@ -141,7 +142,7 @@ func syncResources(ctx *chetypes.DeployContext, prometheusResourceProvider Prome DiffOpts: resource.DiffOpts, }, ); err != nil { - return err + return errors.Wrap(err, "Failed to sync resource") } } @@ -163,7 +164,7 @@ func deleteResources(ctx *chetypes.DeployContext, prometheusResourceProvider Pro }, resource.Object, ); err != nil { - return err + return errors.Wrap(err, "Failed to delete resource") } } @@ -229,59 +230,35 @@ func deleteAbandonedResources(ctx *chetypes.DeployContext) error { syncObjects := []k8sclient.SyncTarget{ { Object: &monitoringv1.ServiceMonitor{}, - Key: types.NamespacedName{ - Name: "che-host", - Namespace: ctx.CheCluster.Namespace, - }, + Key: types.NamespacedName{Name: "che-host", Namespace: ctx.CheCluster.Namespace}, }, { Object: &monitoringv1.ServiceMonitor{}, - Key: types.NamespacedName{ - Name: "devworkspace-controller", - Namespace: ctx.CheCluster.Namespace, - }, + Key: types.NamespacedName{Name: "devworkspace-controller", Namespace: ctx.CheCluster.Namespace}, }, { Object: &monitoringv1.ServiceMonitor{}, - Key: types.NamespacedName{ - Name: "openshift-devspaces-metrics-exporter", - Namespace: operatorNamespace, - }, + Key: types.NamespacedName{Name: "openshift-devspaces-metrics-exporter", Namespace: operatorNamespace}, }, { Object: &rbacv1.Role{}, - Key: types.NamespacedName{ - Name: "prometheus-k8s", - Namespace: ctx.CheCluster.Namespace, - }, + Key: types.NamespacedName{Name: "prometheus-k8s", Namespace: ctx.CheCluster.Namespace}, }, { Object: &rbacv1.Role{}, - Key: types.NamespacedName{ - Name: "prometheus-k8s", - Namespace: operatorNamespace, - }, + Key: types.NamespacedName{Name: "prometheus-k8s", Namespace: operatorNamespace}, }, { Object: &rbacv1.RoleBinding{}, - Key: types.NamespacedName{ - Name: fmt.Sprintf("view-%s-openshift-monitoring-prometheus-k8s", defaults.GetCheFlavor()), - Namespace: ctx.CheCluster.Namespace, - }, + Key: types.NamespacedName{Name: fmt.Sprintf("view-%s-openshift-monitoring-prometheus-k8s", defaults.GetCheFlavor()), Namespace: ctx.CheCluster.Namespace}, }, { Object: &rbacv1.RoleBinding{}, - Key: types.NamespacedName{ - Name: fmt.Sprintf("view-%s-openshift-monitoring-prometheus-k8s", defaults.GetCheFlavor()), - Namespace: operatorNamespace, - }, + Key: types.NamespacedName{Name: fmt.Sprintf("view-%s-openshift-monitoring-prometheus-k8s", defaults.GetCheFlavor()), Namespace: operatorNamespace}, }, { Object: &rbacv1.RoleBinding{}, - Key: types.NamespacedName{ - Name: "view-openshift-monitoring-prometheus-k8s", - Namespace: operatorNamespace, - }, + Key: types.NamespacedName{Name: "view-openshift-monitoring-prometheus-k8s", Namespace: operatorNamespace}, }, } @@ -292,7 +269,7 @@ func deleteAbandonedResources(ctx *chetypes.DeployContext) error { syncObject.Object, ) if err != nil { - return err + return errors.Wrap(err, "Failed to delete resource") } } diff --git a/pkg/deploy/metrics/metrics_test.go b/pkg/deploy/metrics/metrics_test.go index 34d9ced320..597bd2dd69 100644 --- a/pkg/deploy/metrics/metrics_test.go +++ b/pkg/deploy/metrics/metrics_test.go @@ -115,6 +115,96 @@ func TestReconcileMetricsDisabled(t *testing.T) { assert.False(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: cheServerServiceMonitorName, Namespace: "eclipse-che"}, &monitoringv1.ServiceMonitor{})) } +func TestServiceMonitorIntervalPreservation(t *testing.T) { + cheFlavor := defaults.GetCheFlavor() + cheServerServiceMonitorName := fmt.Sprintf(cheServerServiceMonitorNameTemplate, cheFlavor) + + existingServiceMonitor := &monitoringv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "eclipse-che", + Name: cheServerServiceMonitorName, + }, + Spec: monitoringv1.ServiceMonitorSpec{ + Endpoints: []monitoringv1.Endpoint{ + { + Interval: "30s", + Scheme: "http", + Port: metricsPortName, + }, + }, + }, + } + + cheCluster := &chev2.CheCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "eclipse-che", + Name: "eclipse-che", + }, + Spec: chev2.CheClusterSpec{ + Components: chev2.CheClusterComponents{ + Metrics: chev2.ServerMetrics{Enable: true}, + }, + }, + } + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "eclipse-che", + }, + } + + ctx := test.NewCtxBuilder().WithCheCluster(cheCluster).WithObjects(namespace, existingServiceMonitor).Build() + + reconciler := NewMetricsReconciler() + test.EnsureReconcile(t, ctx, reconciler.Reconcile) + + sm := &monitoringv1.ServiceMonitor{} + err := ctx.ClusterAPI.Client.Get(context.TODO(), types.NamespacedName{Name: cheServerServiceMonitorName, Namespace: "eclipse-che"}, sm) + assert.NoError(t, err) + assert.Equal(t, monitoringv1.Duration("30s"), sm.Spec.Endpoints[0].Interval) +} + +func TestReconcileMetricsIdempotent(t *testing.T) { + cheCluster := &chev2.CheCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "eclipse-che", + Name: "eclipse-che", + }, + Spec: chev2.CheClusterSpec{ + Components: chev2.CheClusterComponents{ + Metrics: chev2.ServerMetrics{Enable: true}, + }, + }, + } + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "eclipse-che", + }, + } + + ctx := test.NewCtxBuilder().WithCheCluster(cheCluster).WithObjects(namespace).Build() + + reconciler := NewMetricsReconciler() + test.EnsureReconcile(t, ctx, reconciler.Reconcile) + test.EnsureReconcile(t, ctx, reconciler.Reconcile) + + cheFlavor := defaults.GetCheFlavor() + + // DWO resources + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoPrometheusRoleName, Namespace: "openshift-operators"}, &rbacv1.Role{})) + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoPrometheusRoleBindingName, Namespace: "openshift-operators"}, &rbacv1.RoleBinding{})) + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: dwoServiceMonitorName, Namespace: "eclipse-che"}, &monitoringv1.ServiceMonitor{})) + + // Che server resources + cheServerRoleName := fmt.Sprintf(cheServerPrometheusRoleNameTemplate, cheFlavor) + cheServerRoleBindingName := fmt.Sprintf(cheServerPrometheusRoleBindingNameTemplate, cheFlavor) + cheServerServiceMonitorName := fmt.Sprintf(cheServerServiceMonitorNameTemplate, cheFlavor) + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: cheServerRoleName, Namespace: "eclipse-che"}, &rbacv1.Role{})) + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: cheServerRoleBindingName, Namespace: "eclipse-che"}, &rbacv1.RoleBinding{})) + assert.True(t, test.IsObjectExists(ctx.ClusterAPI.Client, types.NamespacedName{Name: cheServerServiceMonitorName, Namespace: "eclipse-che"}, &monitoringv1.ServiceMonitor{})) +} + func TestFinalizeMetrics(t *testing.T) { cheCluster := &chev2.CheCluster{ ObjectMeta: metav1.ObjectMeta{ From aededc3b3711126a092548fdc21712c40fda5373 Mon Sep 17 00:00:00 2001 From: Anatolii Bazko Date: Wed, 13 May 2026 10:00:12 +0200 Subject: [PATCH 6/9] fixes Signed-off-by: Anatolii Bazko --- .../manifests/che-operator.clusterserviceversion.yaml | 7 ++++--- config/rbac/cluster_role.yaml | 1 + deploy/deployment/kubernetes/combined.yaml | 1 + .../kubernetes/objects/che-operator.ClusterRole.yaml | 1 + deploy/deployment/openshift/combined.yaml | 1 + .../openshift/objects/che-operator.ClusterRole.yaml | 1 + helmcharts/next/templates/che-operator.ClusterRole.yaml | 1 + 7 files changed, 10 insertions(+), 3 deletions(-) diff --git a/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml b/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml index 0c4aef612e..58328651cf 100644 --- a/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml +++ b/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml @@ -86,7 +86,7 @@ metadata: categories: Developer Tools certified: "false" containerImage: quay.io/eclipse/che-operator:next - createdAt: "2026-05-13T07:31:33Z" + createdAt: "2026-05-13T08:00:02Z" description: A Kube-native development solution that delivers portable and collaborative developer workspaces. features.operators.openshift.io/cnf: "false" @@ -108,7 +108,7 @@ metadata: operatorframework.io/arch.amd64: supported operatorframework.io/arch.arm64: supported operatorframework.io/os.linux: supported - name: eclipse-che.v7.118.0-977.next + name: eclipse-che.v7.118.0-978.next namespace: placeholder spec: apiservicedefinitions: {} @@ -910,6 +910,7 @@ spec: - get - watch - list + - create - apiGroups: - "" resources: @@ -1153,7 +1154,7 @@ spec: name: gateway-authorization-sidecar-k8s - image: quay.io/che-incubator/header-rewrite-proxy:latest name: gateway-header-sidecar - version: 7.118.0-977.next + version: 7.118.0-978.next webhookdefinitions: - admissionReviewVersions: - v1 diff --git a/config/rbac/cluster_role.yaml b/config/rbac/cluster_role.yaml index dce4acb7bd..4826bf4c43 100644 --- a/config/rbac/cluster_role.yaml +++ b/config/rbac/cluster_role.yaml @@ -387,6 +387,7 @@ rules: - get - watch - list + - create - apiGroups: - '' resources: diff --git a/deploy/deployment/kubernetes/combined.yaml b/deploy/deployment/kubernetes/combined.yaml index 099062922a..3c3a0dee48 100644 --- a/deploy/deployment/kubernetes/combined.yaml +++ b/deploy/deployment/kubernetes/combined.yaml @@ -23183,6 +23183,7 @@ rules: - get - watch - list + - create - apiGroups: - "" resources: diff --git a/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml b/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml index 0f34936b44..7d427f1b17 100644 --- a/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml +++ b/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml @@ -387,6 +387,7 @@ rules: - get - watch - list + - create - apiGroups: - "" resources: diff --git a/deploy/deployment/openshift/combined.yaml b/deploy/deployment/openshift/combined.yaml index 8c11417299..ddc79e82a3 100644 --- a/deploy/deployment/openshift/combined.yaml +++ b/deploy/deployment/openshift/combined.yaml @@ -23183,6 +23183,7 @@ rules: - get - watch - list + - create - apiGroups: - "" resources: diff --git a/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml b/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml index 0f34936b44..7d427f1b17 100644 --- a/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml +++ b/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml @@ -387,6 +387,7 @@ rules: - get - watch - list + - create - apiGroups: - "" resources: diff --git a/helmcharts/next/templates/che-operator.ClusterRole.yaml b/helmcharts/next/templates/che-operator.ClusterRole.yaml index 0f34936b44..7d427f1b17 100644 --- a/helmcharts/next/templates/che-operator.ClusterRole.yaml +++ b/helmcharts/next/templates/che-operator.ClusterRole.yaml @@ -387,6 +387,7 @@ rules: - get - watch - list + - create - apiGroups: - "" resources: From 1fd7729073ce84110be7f48045315c4ff821b17c Mon Sep 17 00:00:00 2001 From: Anatolii Bazko Date: Wed, 13 May 2026 10:43:35 +0200 Subject: [PATCH 7/9] fixes Signed-off-by: Anatolii Bazko --- .../manifests/che-operator.clusterserviceversion.yaml | 7 ++++--- config/rbac/cluster_role.yaml | 1 + deploy/deployment/kubernetes/combined.yaml | 1 + .../kubernetes/objects/che-operator.ClusterRole.yaml | 1 + deploy/deployment/openshift/combined.yaml | 1 + .../openshift/objects/che-operator.ClusterRole.yaml | 1 + helmcharts/next/templates/che-operator.ClusterRole.yaml | 1 + pkg/common/operator-defaults/defaults.go | 4 ++-- pkg/deploy/metrics/metrics.go | 5 ++++- 9 files changed, 16 insertions(+), 6 deletions(-) diff --git a/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml b/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml index 58328651cf..2582166b4d 100644 --- a/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml +++ b/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml @@ -86,7 +86,7 @@ metadata: categories: Developer Tools certified: "false" containerImage: quay.io/eclipse/che-operator:next - createdAt: "2026-05-13T08:00:02Z" + createdAt: "2026-05-13T08:40:26Z" description: A Kube-native development solution that delivers portable and collaborative developer workspaces. features.operators.openshift.io/cnf: "false" @@ -108,7 +108,7 @@ metadata: operatorframework.io/arch.amd64: supported operatorframework.io/arch.arm64: supported operatorframework.io/os.linux: supported - name: eclipse-che.v7.118.0-978.next + name: eclipse-che.v7.118.0-979.next namespace: placeholder spec: apiservicedefinitions: {} @@ -911,6 +911,7 @@ spec: - watch - list - create + - delete - apiGroups: - "" resources: @@ -1154,7 +1155,7 @@ spec: name: gateway-authorization-sidecar-k8s - image: quay.io/che-incubator/header-rewrite-proxy:latest name: gateway-header-sidecar - version: 7.118.0-978.next + version: 7.118.0-979.next webhookdefinitions: - admissionReviewVersions: - v1 diff --git a/config/rbac/cluster_role.yaml b/config/rbac/cluster_role.yaml index 4826bf4c43..02af0240f3 100644 --- a/config/rbac/cluster_role.yaml +++ b/config/rbac/cluster_role.yaml @@ -388,6 +388,7 @@ rules: - watch - list - create + - delete - apiGroups: - '' resources: diff --git a/deploy/deployment/kubernetes/combined.yaml b/deploy/deployment/kubernetes/combined.yaml index 3c3a0dee48..3e057308ed 100644 --- a/deploy/deployment/kubernetes/combined.yaml +++ b/deploy/deployment/kubernetes/combined.yaml @@ -23184,6 +23184,7 @@ rules: - watch - list - create + - delete - apiGroups: - "" resources: diff --git a/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml b/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml index 7d427f1b17..1bb6d3e686 100644 --- a/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml +++ b/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml @@ -388,6 +388,7 @@ rules: - watch - list - create + - delete - apiGroups: - "" resources: diff --git a/deploy/deployment/openshift/combined.yaml b/deploy/deployment/openshift/combined.yaml index ddc79e82a3..8c7fb63fbc 100644 --- a/deploy/deployment/openshift/combined.yaml +++ b/deploy/deployment/openshift/combined.yaml @@ -23184,6 +23184,7 @@ rules: - watch - list - create + - delete - apiGroups: - "" resources: diff --git a/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml b/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml index 7d427f1b17..1bb6d3e686 100644 --- a/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml +++ b/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml @@ -388,6 +388,7 @@ rules: - watch - list - create + - delete - apiGroups: - "" resources: diff --git a/helmcharts/next/templates/che-operator.ClusterRole.yaml b/helmcharts/next/templates/che-operator.ClusterRole.yaml index 7d427f1b17..1bb6d3e686 100644 --- a/helmcharts/next/templates/che-operator.ClusterRole.yaml +++ b/helmcharts/next/templates/che-operator.ClusterRole.yaml @@ -388,6 +388,7 @@ rules: - watch - list - create + - delete - apiGroups: - "" resources: diff --git a/pkg/common/operator-defaults/defaults.go b/pkg/common/operator-defaults/defaults.go index 1f5b50ce0c..019d3100ca 100644 --- a/pkg/common/operator-defaults/defaults.go +++ b/pkg/common/operator-defaults/defaults.go @@ -101,8 +101,8 @@ func Initialize() { defaultGatewayOpenShiftAuthenticationSidecarImage = ensureEnv(util.GetArchitectureDependentEnvName("RELATED_IMAGE_gateway_authentication_sidecar")) defaultGatewayOpenShiftAuthorizationSidecarImage = ensureEnv(util.GetArchitectureDependentEnvName("RELATED_IMAGE_gateway_authorization_sidecar")) - defaultGatewayKubernetesAuthenticationSidecarImage = ensureEnv(util.GetArchitectureDependentEnvName("RELATED_IMAGE_gateway_authentication_sidecar_k8s")) - defaultGatewayKubernetesAuthorizationSidecarImage = ensureEnv(util.GetArchitectureDependentEnvName("RELATED_IMAGE_gateway_authorization_sidecar_k8s")) + //defaultGatewayKubernetesAuthenticationSidecarImage = ensureEnv(util.GetArchitectureDependentEnvName("RELATED_IMAGE_gateway_authentication_sidecar_k8s")) + //defaultGatewayKubernetesAuthorizationSidecarImage = ensureEnv(util.GetArchitectureDependentEnvName("RELATED_IMAGE_gateway_authorization_sidecar_k8s")) // Don't get some k8s specific env if !infrastructure.IsOpenShift() { diff --git a/pkg/deploy/metrics/metrics.go b/pkg/deploy/metrics/metrics.go index 5bfe6cda7f..7eeaa649df 100644 --- a/pkg/deploy/metrics/metrics.go +++ b/pkg/deploy/metrics/metrics.go @@ -80,12 +80,15 @@ func (r *MetricsReconciler) Reconcile(ctx *chetypes.DeployContext) (reconcile.Re } } + // It is safe to remove abandoned resources after reconciling new ones + // since resources names are different. if !isAbandonedResourcesDeleted { if err := deleteAbandonedResources(ctx); err != nil { return reconcile.Result{}, false, err } - // We don't need to delete them on every reconcile loop + // We don't need to delete them on every reconcile loop even + // having the fact they can be recreated by external tools. isAbandonedResourcesDeleted = true } From 7fa0d19a9a923813782a4cdacaa87aa35dc5bca2 Mon Sep 17 00:00:00 2001 From: Anatolii Bazko Date: Wed, 13 May 2026 11:09:06 +0200 Subject: [PATCH 8/9] fixes Signed-off-by: Anatolii Bazko --- pkg/common/operator-defaults/defaults.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/common/operator-defaults/defaults.go b/pkg/common/operator-defaults/defaults.go index 019d3100ca..1f5b50ce0c 100644 --- a/pkg/common/operator-defaults/defaults.go +++ b/pkg/common/operator-defaults/defaults.go @@ -101,8 +101,8 @@ func Initialize() { defaultGatewayOpenShiftAuthenticationSidecarImage = ensureEnv(util.GetArchitectureDependentEnvName("RELATED_IMAGE_gateway_authentication_sidecar")) defaultGatewayOpenShiftAuthorizationSidecarImage = ensureEnv(util.GetArchitectureDependentEnvName("RELATED_IMAGE_gateway_authorization_sidecar")) - //defaultGatewayKubernetesAuthenticationSidecarImage = ensureEnv(util.GetArchitectureDependentEnvName("RELATED_IMAGE_gateway_authentication_sidecar_k8s")) - //defaultGatewayKubernetesAuthorizationSidecarImage = ensureEnv(util.GetArchitectureDependentEnvName("RELATED_IMAGE_gateway_authorization_sidecar_k8s")) + defaultGatewayKubernetesAuthenticationSidecarImage = ensureEnv(util.GetArchitectureDependentEnvName("RELATED_IMAGE_gateway_authentication_sidecar_k8s")) + defaultGatewayKubernetesAuthorizationSidecarImage = ensureEnv(util.GetArchitectureDependentEnvName("RELATED_IMAGE_gateway_authorization_sidecar_k8s")) // Don't get some k8s specific env if !infrastructure.IsOpenShift() { From 2d1be20a3dc9fc2e0c8c3432a88d1314a2b9e659 Mon Sep 17 00:00:00 2001 From: Anatolii Bazko Date: Fri, 15 May 2026 12:59:38 +0200 Subject: [PATCH 9/9] udpate dev resources Signed-off-by: Anatolii Bazko --- .../manifests/che-operator.clusterserviceversion.yaml | 7 ++++--- config/rbac/cluster_role.yaml | 1 + deploy/deployment/kubernetes/combined.yaml | 1 + .../kubernetes/objects/che-operator.ClusterRole.yaml | 1 + deploy/deployment/openshift/combined.yaml | 1 + .../openshift/objects/che-operator.ClusterRole.yaml | 1 + helmcharts/next/templates/che-operator.ClusterRole.yaml | 1 + 7 files changed, 10 insertions(+), 3 deletions(-) diff --git a/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml b/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml index 2582166b4d..a912f55a10 100644 --- a/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml +++ b/bundle/next/eclipse-che/manifests/che-operator.clusterserviceversion.yaml @@ -86,7 +86,7 @@ metadata: categories: Developer Tools certified: "false" containerImage: quay.io/eclipse/che-operator:next - createdAt: "2026-05-13T08:40:26Z" + createdAt: "2026-05-15T10:45:47Z" description: A Kube-native development solution that delivers portable and collaborative developer workspaces. features.operators.openshift.io/cnf: "false" @@ -108,7 +108,7 @@ metadata: operatorframework.io/arch.amd64: supported operatorframework.io/arch.arm64: supported operatorframework.io/os.linux: supported - name: eclipse-che.v7.118.0-979.next + name: eclipse-che.v7.118.0-980.next namespace: placeholder spec: apiservicedefinitions: {} @@ -644,6 +644,7 @@ spec: - create - update - watch + - patch - apiGroups: - apps resources: @@ -1155,7 +1156,7 @@ spec: name: gateway-authorization-sidecar-k8s - image: quay.io/che-incubator/header-rewrite-proxy:latest name: gateway-header-sidecar - version: 7.118.0-979.next + version: 7.118.0-980.next webhookdefinitions: - admissionReviewVersions: - v1 diff --git a/config/rbac/cluster_role.yaml b/config/rbac/cluster_role.yaml index 02af0240f3..42fa4e68ce 100644 --- a/config/rbac/cluster_role.yaml +++ b/config/rbac/cluster_role.yaml @@ -121,6 +121,7 @@ rules: - create - update - watch + - patch - apiGroups: - apps resources: diff --git a/deploy/deployment/kubernetes/combined.yaml b/deploy/deployment/kubernetes/combined.yaml index 3e057308ed..553b37d0a5 100644 --- a/deploy/deployment/kubernetes/combined.yaml +++ b/deploy/deployment/kubernetes/combined.yaml @@ -22917,6 +22917,7 @@ rules: - create - update - watch + - patch - apiGroups: - apps resources: diff --git a/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml b/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml index 1bb6d3e686..a1969ac2e0 100644 --- a/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml +++ b/deploy/deployment/kubernetes/objects/che-operator.ClusterRole.yaml @@ -121,6 +121,7 @@ rules: - create - update - watch + - patch - apiGroups: - apps resources: diff --git a/deploy/deployment/openshift/combined.yaml b/deploy/deployment/openshift/combined.yaml index 8c7fb63fbc..9563b9551c 100644 --- a/deploy/deployment/openshift/combined.yaml +++ b/deploy/deployment/openshift/combined.yaml @@ -22917,6 +22917,7 @@ rules: - create - update - watch + - patch - apiGroups: - apps resources: diff --git a/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml b/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml index 1bb6d3e686..a1969ac2e0 100644 --- a/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml +++ b/deploy/deployment/openshift/objects/che-operator.ClusterRole.yaml @@ -121,6 +121,7 @@ rules: - create - update - watch + - patch - apiGroups: - apps resources: diff --git a/helmcharts/next/templates/che-operator.ClusterRole.yaml b/helmcharts/next/templates/che-operator.ClusterRole.yaml index 1bb6d3e686..a1969ac2e0 100644 --- a/helmcharts/next/templates/che-operator.ClusterRole.yaml +++ b/helmcharts/next/templates/che-operator.ClusterRole.yaml @@ -121,6 +121,7 @@ rules: - create - update - watch + - patch - apiGroups: - apps resources: